diff --git a/.env.template b/.env.template new file mode 100644 index 000000000..167ff5ccc --- /dev/null +++ b/.env.template @@ -0,0 +1,9 @@ +TOKEN= +HOST= +ARM_CLIENT_ID= +ARM_CLIENT_SECRET= +ARM_TENANT_ID= +ARM_SUBSCRIPTION_ID= +TEST_MANAGED_RESOURCE_GROUP= +TEST_WORKSPACE_NAME= +TEST_RESOURCE_GROUP= \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..e0abdafbe --- /dev/null +++ b/.gitignore @@ -0,0 +1,323 @@ + +# Created by https://www.gitignore.io/api/go,python,terraform,virtualenv,pycharm+iml,intellij+all,visualstudiocode +# Edit at https://www.gitignore.io/?templates=go,python,terraform,virtualenv,pycharm+iml,intellij+all,visualstudiocode + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +### Go Patch ### +#/vendor/ +/Godeps/ + +### Intellij+all ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### Intellij+all Patch ### +# Ignores the whole .idea folder and all .iml files +# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 + +.idea/ + +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + +*.iml +modules.xml +.idea/misc.xml +*.ipr + +# Sonarlint plugin +.idea/sonarlint + +### PyCharm+iml ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff + +# Generated files + +# Sensitive or high-churn files + +# Gradle + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake + +# Mongo Explorer plugin + +# File-based project format + +# IntelliJ + +# mpeltonen/sbt-idea plugin + +# JIRA plugin + +# Cursive Clojure plugin + +# Crashlytics plugin (for Android Studio and IntelliJ) + +# Editor-based Rest Client + +# Android studio 3.1+ serialized cache file + +### PyCharm+iml Patch ### +# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 + + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### Terraform ### +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log + +# Ignore any .tfvars files that are generated automatically for each Terraform run. Most +# .tfvars files are managed as part of configuration and so should be included in +# version control. +# +# example.tfvars + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +### VirtualEnv ### +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +pyvenv.cfg +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pip-selfcheck.json + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history + +# End of https://www.gitignore.io/api/go,python,terraform,virtualenv,pycharm+iml,intellij+all,visualstudiocode +/terraform-provider-db +/.vscode/ + +# Remove tfvars to prevent secrets from leaking and ignore any terraform files in root folder +**/*.tfvars +/*.tf +.DS_Store \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..5a9face96 --- /dev/null +++ b/Makefile @@ -0,0 +1,53 @@ +default: build + +test: + @echo "==> Running tests..." + @gotestsum --raw-command go test -v -json -short -coverprofile=coverage.out ./... + +int: + @echo "==> Running tests..." + @gotestsum --raw-command go test -v -json -coverprofile=coverage.out ./... + +coverage: test + @echo "==> Opening coverage for unit tests..." + @go tool cover -html=coverage.out + +int-build: int build + +build: + @echo "==> Building source code with go build..." + @go build -mod vendor -v -o terraform-provider-db + +fmt: + @echo "==> Formatting source code with gofmt..." + @go fmt ./... + + +python-setup: + @echo "==> Setting up virtual env and installing python libraries..." + @python -m pip install virtualenv + @cd docs && python -m virtualenv venv + @cd docs && source venv/bin/activate && python -m pip install -r requirements.txt + +docs: + @echo "==> Building Docs ..." + @cd docs && source venv/bin/activate && make html + +opendocs: docs + @echo "==> Opening Docs ..." + @cd docs && open build/html/index.html + +vendor: + @echo "==> Filling vendor folder with library code..." + @go mod vendor + +# INTEGRATION TESTING WITH TERRAFORM EXAMPLES +terraform-setup: fmt build + @echo "==> Initializing Terraform..." + @terraform init + +terraform-apply: terraform-setup + @echo "==> Initializing Terraform plan..." + @TF_LOG_PATH=log.out TF_LOG=debug terraform apply + +.PHONY: build fmt python-setup docs vendor terraform-local build fmt coverage test \ No newline at end of file diff --git a/README.md b/README.md index f3ffeb7ac..1fb7680e9 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,129 @@ -# PROJECT NAME -Standard Project Template for Databricks Labs Projects +# Databricks Terraform Provider +## Getting Started +Please note that there is a Makefile which contains all the commands you would need to run this project. -## Project Description -Short description of project's purpose +This code base to contribute to requires the following software: -## Project Support -Please note that all projects in the /databrickslabs github account are provided for your exploration only, and are not formally supported by Databricks with Service Level Agreements (SLAs). They are provided AS-IS and we do not make any guarantees of any kind. Please do not submit a support ticket relating to any issues arising from the use of these projects. +* golang 1.13.X -Any issues discovered through the use of this project should be filed as GitHub Issues on the Repo. They will be reviewed as time permits, but there are no formal SLAs for support. +* python 3.6.X +## Project Components +###High Level Databricks Client CR[U]D +The client folder contains all the code for the golang sdk for the Databricks REST API. It is kept separate from the databricks terraform provider with its own unit/integration tests. The client so far supports the following: -## Building the Project -Instructions for how to build the project +#### Clusters +TODO! -## Deploying / Installing the Project -Instructions for how to deploy the project, or install it +#### Libraries +* [ ] Create Library Installation -## Releasing the Project -Instructions for how to release a version of the project +* [ ] Get Library Installation (cluster-status api) -## Using the Project -Simple examples on how to use the project +* [ ] Delete Library Installation + +> Note: All library manipulation for clusters must be performed when the cluster is in a running state. + +#### Jobs API +TODO! + +Instance Pools +* [x] Create Instance Pools + +* [x] Read Instance Pools + +* [x] Update Instance Pools + +* [x] Delete Instance Pools + +#### Secrets +##### Secret Scope +* [x] Create Secret Scope + +* [x] Read Secret Scope* + +* [x] Delete Secret Scope + +##### Secret +* [x] Create Secret + +* [x] Read Secret* + +* [x] Delete Secret + +Note: For reading a secret it will only fetch the metadata so the value of the secret is only accessible via the databricks runtime in databricks. + +##### Secret ACL +* [x] Create Secret ACL + +* [x] Read Secret ACL* + +* [x] Delete Secret ACL + +#### Token API +* [ ] Create Token + +* [ ] Read Token* + +* [ ] Delete (Revoke) Token + +#### Workspace (Notebooks) API +* [ ] Import/create Workspace Path (notebooks) + +* [ ] Get Workspace Path (notebooks) + +* [ ] Delete Workspace Path (notebooks) + +#### SCIM API +##### Users +* [ ] Create User + +* [ ] Update User + +* [ ] Delete User + +Note: For updating a user use Patch for entitlements, Put for everything else. We May want to deal with entitlements as a separate resource to make it easy to manage. Creating the user object and then mapping entitlements are two separate activities. + +##### Groups +* [ ] Create Group + +* [ ] Get Group* + +* [ ] Delete Group + +##### Group Member +* [ ] Create Member + +* [ ] Get Member* + +* [ ] Delete Member + +##### Group entitlements +TBD! + +#### MLFlow API +TODO! + +#### Instance Profiles API (AWS Only) +* [ ] Create IAM Instance Profile Link + +* [ ] Get IAM Instance Profile Link + +* [ ] Delete IAM Instance Profile Link + +#### DBFS +* [ ] Create Object from local file path (/dbfs/put) + +* [ ] Get Object (metadata; /dbfs/get-status) + +* [ ] Delete Object (metadata; /dbfs/get-status) + +* For the Read operation there were no direct rest api calls for read so it is a list and search for the item and return the metadata from the list. + + +## Testing + +###TODOs + +* [ ] Integration tests should be run at a client level against both azure and aws to maintain sdk parity against both apis **(currently only on one cloud)** +* [ ] Terraform acceptance tests should be run against both aws and azure to maintain parity of provider between both cloud services **(currently only on one cloud)** \ No newline at end of file diff --git a/client/client.go b/client/client.go new file mode 100644 index 000000000..added5b3e --- /dev/null +++ b/client/client.go @@ -0,0 +1,168 @@ +package client + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "github.com/google/go-querystring/query" + "io/ioutil" + "log" + "net/http" + "net/url" + "time" +) + +type CloudServiceProvider string + +const ( + AWS CloudServiceProvider = "AmazonWebServices" + Azure CloudServiceProvider = "Azure" +) + +// DBClientOption is used to configure the DataBricks Client +type DBClientOption struct { + Host string + Token string + DefaultHeaders map[string]string + InsecureSkipVerify bool + TimeoutSeconds int + client http.Client + cloudProvider CloudServiceProvider +} + +// Init initializes the client +func (o *DBClientOption) Init() { + if o.TimeoutSeconds == 0 { + o.TimeoutSeconds = 10 + } + o.client = http.Client{ + Timeout: time.Duration(time.Duration(o.TimeoutSeconds) * time.Second), + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: o.InsecureSkipVerify, + }, + }, + } +} + +func (o *DBClientOption) getHTTPClient() http.Client { + return o.client +} + +func (o *DBClientOption) getAuthHeader() map[string]string { + auth := make(map[string]string) + auth["Authorization"] = "Bearer " + o.Token + auth["Content-Type"] = "application/json" + return auth +} + +func (o *DBClientOption) getUserAgentHeader() map[string]string { + return map[string]string{ + "User-Agent": fmt.Sprintf("databricks-sdk-golang-%s", SdkVersion), + } +} + +func (o *DBClientOption) getDefaultHeaders() map[string]string { + auth := o.getAuthHeader() + userAgent := o.getUserAgentHeader() + + defaultHeaders := make(map[string]string) + for k, v := range auth { + defaultHeaders[k] = v + } + for k, v := range o.DefaultHeaders { + defaultHeaders[k] = v + } + for k, v := range userAgent { + defaultHeaders[k] = v + } + return defaultHeaders +} + +func (o *DBClientOption) getRequestURI(path string, apiVersion string) (string, error) { + var apiVersionString string + if apiVersion == "" { + apiVersionString = "2.0" + } else { + apiVersionString = apiVersion + } + + parsedURI, err := url.Parse(o.Host) + if err != nil { + return "", err + } + requestURI := fmt.Sprintf("%s://%s/api/%s%s", parsedURI.Scheme, parsedURI.Host, apiVersionString, path) + return requestURI, nil +} + +// PerformQuery can be used in a client or directly +func PerformQuery(option DBClientOption, method, path string, apiVersion string, headers map[string]string, marshalJson bool, useRawPath bool, data interface{}) ([]byte, error) { + + var requestURL string + var err error + if useRawPath { + requestURL = path + } else { + requestURL, err = option.getRequestURI(path, apiVersion) + if err != nil { + return nil, err + } + } + requestHeaders := option.getDefaultHeaders() + + if headers != nil && len(headers) > 0 { + for k, v := range headers { + requestHeaders[k] = v + } + } + + var requestBody []byte + if method == "GET" { + params, err := query.Values(data) + if err != nil { + return nil, err + } + requestURL += "?" + params.Encode() + } else { + if marshalJson { + bodyBytes, err := json.Marshal(data) + if err != nil { + return nil, err + } + + requestBody = bodyBytes + log.Println(string(requestBody)) + } else { + requestBody = []byte(data.(string)) + } + } + + client := option.getHTTPClient() + + request, err := http.NewRequest(method, requestURL, bytes.NewBuffer(requestBody)) + if err != nil { + return nil, err + } + for k, v := range requestHeaders { + request.Header.Set(k, v) + } + + resp, err := client.Do(request) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Response from server (%d) %s", resp.StatusCode, string(body)) + } + + return body, nil +} diff --git a/client/model/cluster.go b/client/model/cluster.go new file mode 100644 index 000000000..ea1660f25 --- /dev/null +++ b/client/model/cluster.go @@ -0,0 +1,257 @@ +package model + +import "errors" + +type AutoScale struct { + MinWorkers int32 `json:"min_workers,omitempty"` + MaxWorkers int32 `json:"max_workers,omitempty"` +} + +type AwsAvailability string + +const ( + AwsAvailabilitySpot = "SPOT" + AwsAvailabilityOnDemand = "ON_DEMAND" + AwsAvailabilitySpotWithFallback = "SPOT_WITH_FALLBACK" +) + +func GetAwsAvailability(val string) (AwsAvailability, error) { + switch val { + case "SPOT": + return AwsAvailabilitySpot, nil + case "ON_DEMAND": + return AwsAvailabilityOnDemand, nil + case "SPOT_WITH_FALLBACK": + return AwsAvailabilitySpotWithFallback, nil + } + return "", errors.New("No Match!") +} + +type AzureDiskVolumeType string + +const ( + AzureDiskVolumeTypeStandard = "STANDARD_LRS" + AzureDiskVolumeTypePremium = "PREMIUM_LRS" +) + +func GetAzureDiskVolumeType(val string) (AzureDiskVolumeType, error) { + switch val { + case "STANDARD_LRS": + return AzureDiskVolumeTypeStandard, nil + case "PREMIUM_LRS": + return AzureDiskVolumeTypePremium, nil + } + return "", errors.New("No Match!") +} + +type EbsVolumeType string + +const ( + EbsVolumeTypeGeneralPurposeSsd = "GENERAL_PURPOSE_SSD" + EbsVolumeTypeThroughputOptimizedHdd = "THROUGHPUT_OPTIMIZED_HDD" +) + +func GetEbsVolumeType(val string) (EbsVolumeType, error) { + switch val { + case "GENERAL_PURPOSE_SSD": + return EbsVolumeTypeGeneralPurposeSsd, nil + case "THROUGHPUT_OPTIMIZED_HDD": + return EbsVolumeTypeThroughputOptimizedHdd, nil + } + return "", errors.New("No Match!") +} + +type ClusterState string + +const ( + ClusterStatePending = "PENDING" + ClusterStateRunning = "RUNNING" + ClusterStateRestarting = "RESTARTING" + ClusterStateResizing = "RESIZING" + ClusterStateTerminating = "TERMINATING" + ClusterStateTerminated = "TERMINATED" + ClusterStateError = "ERROR" + ClusterStateUnknown = "UNKNOWN" +) + +var ClusterStateNonRunnable []ClusterState = []ClusterState{ClusterStateTerminating, ClusterStateTerminated, ClusterStateError, ClusterStateUnknown} +var ClusterStateNonTerminating []ClusterState = []ClusterState{ClusterStatePending, ClusterStateRunning, ClusterStateRestarting, ClusterStateResizing} + +func ContainsClusterState(clusterStates []ClusterState, searchState ClusterState) bool { + for _, state := range clusterStates { + if state == searchState { + return true + } + } + return false +} + +type AwsAttributes struct { + FirstOnDemand int32 `json:"first_on_demand,omitempty"` + Availability AwsAvailability `json:"availability,omitempty"` + ZoneID string `json:"zone_id,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` + EbsVolumeType EbsVolumeType `json:"ebs_volume_type,omitempty"` + EbsVolumeCount int32 `json:"ebs_volume_count,omitempty"` + EbsVolumeSize int32 `json:"ebs_volume_size,omitempty"` +} + +type DbfsStorageInfo struct { + Destination string `json:"destination,omitempty"` +} + +type S3StorageInfo struct { + Destination string `json:"destination,omitempty"` + Region string `json:"region,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + CannedACL string `json:"canned_acl,omitempty"` +} + +type StorageInfo struct { + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + S3 *S3StorageInfo `json:"s3,omitempty"` +} + +type SparkNodeAwsAttributes struct { + IsSpot bool `json:"is_spot,omitempty"` +} + +type SparkNode struct { + PrivateIP string `json:"private_ip,omitempty"` + PublicDNS string `json:"public_dns,omitempty"` + NodeID string `json:"node_id,omitempty"` + InstanceID string `json:"instance_id,omitempty"` + StartTimestamp int64 `json:"start_timestamp,omitempty"` + NodeAwsAttributes *SparkNodeAwsAttributes `json:"node_aws_attributes,omitempty"` + HostPrivateIP string `json:"host_private_ip,omitempty"` +} + +type TerminationCode string + +const ( + TerminationCodeUserRequest = "USER_REQUEST" + TerminationCodeJobFinished = "JOB_FINISHED" + TerminationCodeInactivity = "INACTIVITY" + TerminationCodeCloudProviderShutdown = "CLOUD_PROVIDER_SHUTDOWN" + TerminationCodeCommunicationLost = "COMMUNICATION_LOST" + TerminationCodeCloudProviderLaunchFailure = "CLOUD_PROVIDER_LAUNCH_FAILURE" + TerminationCodeSparkStartupFailure = "SPARK_STARTUP_FAILURE" + TerminationCodeInvalidArgument = "INVALID_ARGUMENT" + TerminationCodeUnexpectedLaunchFailure = "UNEXPECTED_LAUNCH_FAILURE" + TerminationCodeInternalError = "INTERNAL_ERROR" + TerminationCodeInstanceUnreachable = "INSTANCE_UNREACHABLE" + TerminationCodeRequestRejected = "REQUEST_REJECTED" + TerminationCodeInitScriptFailure = "INIT_SCRIPT_FAILURE" + TerminationCodeTrialExpired = "TRIAL_EXPIRED" +) + +type TerminationParameter string + +const ( + TerminationParameterUsername = "username" + TerminationParameterAwsAPIErrorCode = "aws_api_error_code" + TerminationParameterAwsInstanceStateReason = "aws_instance_state_reason" + TerminationParameterAwsSpotRequestStatus = "aws_spot_request_status" + TerminationParameterAwsSpotRequestFaultCode = "aws_spot_request_fault_code" + TerminationParameterAwsImpairedStatusDetails = "aws_impaired_status_details" + TerminationParameterAwsInstanceStatusEvent = "aws_instance_status_event" + TerminationParameterAwsErrorMessage = "aws_error_message" + TerminationParameterDatabricksErrorMessage = "databricks_error_message" + TerminationParameterInactivityDurationMin = "inactivity_duration_min" + TerminationParameterInstanceID = "instance_id" +) + +type TerminationReason struct { + Code TerminationCode `json:"code,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` +} + +type LogSyncStatus struct { + LastAttempted int64 `json:"last_attempted,omitempty"` + LastException string `json:"last_exception,omitempty"` +} + +type ClusterCloudProviderNodeStatus string + +const ( + ClusterCloudProviderNodeStatusNotEnabledOnSubscription = "NotEnabledOnSubscription" + ClusterCloudProviderNodeStatusNotAvailableInRegion = "NotAvailableInRegion" +) + +type ClusterCloudProviderNodeInfo struct { + Status []ClusterCloudProviderNodeStatus `json:"status,omitempty"` + AvailableCoreQuota float32 `json:"available_core_quota,omitempty"` + TotalCoreQuota float32 `json:"total_core_quota,omitempty"` +} + +type NodeType struct { + NodeTypeID string `json:"node_type_id,omitempty"` + MemoryMb int32 `json:"memory_mb,omitempty"` + NumCores float32 `json:"num_cores,omitempty"` + Description string `json:"description,omitempty"` + InstanceTypeID string `json:"instance_type_id,omitempty"` + IsDeprecated bool `json:"is_deprecated,omitempty"` + NodeInfo *ClusterCloudProviderNodeInfo `json:"node_info,omitempty"` +} + +type Cluster struct { + ClusterId string `json:"cluster_id,omitempty"` + NumWorkers int32 `json:"num_workers,omitempty"` + Autoscale *AutoScale `json:"autoscale,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + SparkVersion string `json:"spark_version,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty"` + DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` + SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` + InitScripts []StorageInfo `json:"init_scripts,omitempty"` + //TODO: Update docker image portion here + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + IdempotencyToken string `json:"idempotency_token,omitempty"` +} + +type ClusterInfo struct { + NumWorkers int32 `json:"num_workers,omitempty"` + AutoScale *AutoScale `json:"autoscale,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + CreatorUserName string `json:"creator_user_name,omitempty"` + Driver *SparkNode `json:"driver,omitempty"` + Executors []SparkNode `json:"executors,omitempty"` + SparkContextID int64 `json:"spark_context_id,omitempty"` + JdbcPort int32 `json:"jdbc_port,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + SparkVersion string `json:"spark_version,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + AwsAttributes *AwsAttributes `json:"aws_attributes,omitempty"` + NodeTypeID string `json:"node_type_id,omitempty"` + DriverNodeTypeID string `json:"driver_node_type_id,omitempty"` + SSHPublicKeys []string `json:"ssh_public_keys,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + ClusterLogConf *StorageInfo `json:"cluster_log_conf,omitempty"` + InitScripts []StorageInfo `json:"init_scripts,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + AutoterminationMinutes int32 `json:"autotermination_minutes,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + ClusterSource AwsAvailability `json:"cluster_source,omitempty"` + State ClusterState `json:"state,omitempty"` + StateMessage string `json:"state_message,omitempty"` + StartTime int64 `json:"start_time,omitempty"` + TerminateTime int64 `json:"terminate_time,omitempty"` + LastStateLossTime int64 `json:"last_state_loss_time,omitempty"` + LastActivityTime int64 `json:"last_activity_time,omitempty"` + ClusterMemoryMb int64 `json:"cluster_memory_mb,omitempty"` + ClusterCores float32 `json:"cluster_cores,omitempty"` + DefaultTags map[string]string `json:"DefaultTags,omitempty"` + ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` + TerminationReason *TerminationReason `json:"termination_reason,omitempty"` +} diff --git a/client/model/command.go b/client/model/command.go new file mode 100644 index 000000000..97ecfb212 --- /dev/null +++ b/client/model/command.go @@ -0,0 +1,15 @@ +package model + +type CommandResults struct { + ResultType string `json:"resultType,omitempty"` + Data interface{} `json:"data,omitempty"` + Schema interface{} `json:"schema,omitempty"` + Truncated bool `json:"truncated,omitempty"` + IsJsonSchema bool `json:"isJsonSchema,omitempty"` +} + +type Command struct { + ID string `json:"id,omitempty"` + Status string `json:"status,omitempty"` + Results *CommandResults `json:"results,omitempty"` +} diff --git a/client/model/group.go b/client/model/group.go new file mode 100644 index 000000000..ad967df52 --- /dev/null +++ b/client/model/group.go @@ -0,0 +1,24 @@ +package model + +type GroupMember struct { + Display string `json:"display,omitempty"` + Value string `json:"value,omitempty"` + Ref string `json:"$ref,omitempty"` +} + +type MemberListItem struct { + Value string `json:"value,omitempty"` +} + +type Group struct { + ID string `json:"id,omitempty"` + Schemas []URN `json:"schemas,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Members []GroupMember `json:"members,omitempty"` + Groups []interface{} `json:"groups,omitempty"` +} + +type GroupPatchRequest struct { + Schemas []URN `json:"schemas,omitempty"` + Operations []GroupPatchOperations `json:"Operations,omitempty"` +} diff --git a/client/model/instance_pool.go b/client/model/instance_pool.go new file mode 100644 index 000000000..4608f3516 --- /dev/null +++ b/client/model/instance_pool.go @@ -0,0 +1,62 @@ +package model + +type InstancePoolAwsAttributes struct { + Availability AwsAvailability `json:"availability,omitempty"` + ZoneId string `json:"zone_id,omitempty"` + SpotBidPricePercent int32 `json:"spot_bid_price_percent,omitempty"` +} + +type InstancePoolDiskType struct { + AzureDiskVolumeType string `json:"azure_disk_volume_type,omitempty"` + EbsVolumeType string `json:"ebs_volume_type,omitempty"` +} + +type InstancePoolDiskSpec struct { + DiskType *InstancePoolDiskType `json:"disk_type,omitempty"` + DiskCount int32 `json:"disk_count,omitempty"` + DiskSize int32 `json:"disk_size,omitempty"` +} + +type InstancePool struct { + InstancePoolName string `json:"instance_pool_name,omitempty"` + MinIdleInstances int32 `json:"min_idle_instances,omitempty"` + MaxCapacity int32 `json:"max_capacity,omitempty"` + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` +} + +type InstancePoolState string + +const ( + InstancePoolStateActive InstancePoolState = "ACTIVE" + InstancePoolStateDeleted InstancePoolState = "DELETED" +) + +type InstancePoolStats struct { + UsedCount int32 `json:"used_count,omitempty"` + IdleCount int32 `json:"idle_count,omitempty"` + PendingUsedCount int32 `json:"pending_used_count,omitempty"` + PendingIdleCount int32 `json:"pending_idle_count,omitempty"` +} + +type InstancePoolInfo struct { + InstancePoolId string `json:"instance_pool_id,omitempty"` + InstancePoolName string `json:"instance_pool_name,omitempty"` + MinIdleInstances int32 `json:"min_idle_instances,omitempty"` + MaxCapacity int32 `json:"max_capacity,omitempty"` + AwsAttributes *InstancePoolAwsAttributes `json:"aws_attributes,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + DefaultTags map[string]string `json:"default_tags,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + IdleInstanceAutoTerminationMinutes int32 `json:"idle_instance_autotermination_minutes,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + DiskSpec *InstancePoolDiskSpec `json:"disk_spec,omitempty"` + PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` + State InstancePoolState `json:"state,omitempty"` + Stats *InstancePoolStats `json:"stats,omitempty"` +} diff --git a/client/model/scim.go b/client/model/scim.go new file mode 100644 index 000000000..869d91561 --- /dev/null +++ b/client/model/scim.go @@ -0,0 +1,20 @@ +package model + +type URN string + +const ( + UserSchema URN = "urn:ietf:params:scim:schemas:core:2.0:User" + WorkspaceUserSchema URN = "urn:ietf:params:scim:schemas:extension:workspace:2.0:User" + PatchOp URN = "urn:ietf:params:scim:api:messages:2.0:PatchOp" + GroupSchema URN = "urn:ietf:params:scim:schemas:core:2.0:Group" +) + +type MembersValue struct { + Members []MemberListItem `json:"members,omitempty"` +} + +type GroupPatchOperations struct { + Op string `json:"op,omitempty"` + Path string `json:"path,omitempty"` + Value *MembersValue `json:"value,omitempty"` +} diff --git a/client/model/secret.go b/client/model/secret.go new file mode 100644 index 000000000..b4207005a --- /dev/null +++ b/client/model/secret.go @@ -0,0 +1,34 @@ +package model + +type ScopeBackendType string + +const ( + ScopeBackendTypeDatabricks ScopeBackendType = "DATABRICKS" +) + +type SecretScope struct { + Name string `json:"name,omitempty"` + BackendType ScopeBackendType `json:"backend_type,omitempty"` +} + +type SecretMetadata struct { + Key string `json:"key,omitempty"` + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` +} + +type AclPermission string + +const ( + AclPermissionRead AclPermission = "READ" + AclPermissionWrite AclPermission = "WRITE" + AclPermissionManage AclPermission = "MANAGE" +) + +func ValidSecretAclPermissions() []AclPermission { + return []AclPermission{AclPermissionManage, AclPermissionRead, AclPermissionWrite} +} + +type AclItem struct { + Principal string `json:"principal,omitempty"` + Permission AclPermission `json:"permission,omitempty"` +} diff --git a/client/model/token.go b/client/model/token.go new file mode 100644 index 000000000..188d40f3d --- /dev/null +++ b/client/model/token.go @@ -0,0 +1,26 @@ +package model + +//go:generate easytags $GOFILE + +type TokenResponse struct { + TokenValue string `json:"token_value,omitempty"` + TokenInfo *TokenInfo `json:"token_info,omitempty"` +} + +type TokenInfo struct { + TokenID string `json:"token_id,omitempty"` + CreationTime int64 `json:"creation_time,omitempty"` + ExpiryTime int64 `json:"expiry_time,omitempty"` + Comment string `json:"comment,omitempty"` +} + +// +//{ +//"token_value":"dapideadbeefdeadbeefdeadbeefdeadbeef", +//"token_info": { +//"token_id":"5715498424f15ee0213be729257b53fc35a47d5953e3bdfd8ed22a0b93b339f4", +//"creation_time":1513120516294, +//"expiry_time":1513120616294, +//"comment":"this is an example token" +//} +//} diff --git a/client/model/user.go b/client/model/user.go new file mode 100644 index 000000000..8a2b13da7 --- /dev/null +++ b/client/model/user.go @@ -0,0 +1,39 @@ +package model + +type Entitlement string + +const ( + AllowClusterCreateEntitlement Entitlement = "allow-cluster-create" + AllowInstancePoolCreateEntitlement Entitlement = "allow-instance-pool-create" +) + +type GroupsListItem struct { + Value string `json:"value,omitempty"` +} + +type EntitlementsListItem struct { + Value Entitlement `json:"value,omitempty"` +} + +type RoleListItem struct { + Value string `json:"value,omitempty"` +} + +type Email struct { + Type interface{} `json:"type,omitempty"` + Value string `json:"value,omitempty"` + Primary interface{} `json:"primary,omitempty"` +} + +type User struct { + ID string `json:"id,omitempty"` + Emails []Email `json:"emails,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Active bool `json:"active,omitempty"` + Schemas []URN `json:"schemas,omitempty"` + UserName string `json:"userName,omitempty"` + Groups []GroupsListItem `json:"groups,omitempty"` + Name map[string]string `json:"name,omitempty"` + Roles []RoleListItem `json:"roles,omitempty"` + Entitlements []EntitlementsListItem `json:"entitlements,omitempty"` +} diff --git a/client/service/api.go b/client/service/api.go new file mode 100644 index 000000000..2483cf0a0 --- /dev/null +++ b/client/service/api.go @@ -0,0 +1,65 @@ +package service + +import ( + db "github.com/stikkireddy/databricks-tf-provider/client" +) + +var scimHeaders = map[string]string{ + "Content-Type": "application/scim+json", +} + +type DBApiClient struct { + Option db.DBClientOption +} + +// Init initializes the client +func (c *DBApiClient) Init(option db.DBClientOption) DBApiClient { + c.Option = option + option.Init() + return *c +} + +// Clusters returns an instance of ClustersAPI +func (c DBApiClient) Clusters() ClustersAPI { + var clustersAPI ClustersAPI + return clustersAPI.init(c) +} + +func (c DBApiClient) Secrets() SecretsAPI { + var secretsAPI SecretsAPI + return secretsAPI.init(c) +} + +func (c DBApiClient) SecretScopes() SecretScopesAPI { + var secretScopesAPI SecretScopesAPI + return secretScopesAPI.init(c) +} + +func (c DBApiClient) SecretAcls() SecretAclsAPI { + var secretAclsAPI SecretAclsAPI + return secretAclsAPI.init(c) +} + +func (c DBApiClient) Tokens() TokensAPI { + var tokensAPI TokensAPI + return tokensAPI.init(c) +} + +func (c DBApiClient) Users() UsersAPI { + var usersAPI UsersAPI + return usersAPI.init(c) +} + +func (c DBApiClient) Groups() GroupsAPI { + var groupsAPI GroupsAPI + return groupsAPI.init(c) +} + +func (c DBApiClient) InstancePools() InstancePoolsAPI { + var instancePoolsAPI InstancePoolsAPI + return instancePoolsAPI.init(c) +} + +func (c *DBApiClient) performQuery(method, path string, apiVersion string, headers map[string]string, data interface{}) ([]byte, error) { + return db.PerformQuery(c.Option, method, path, "2.0", headers, true, false, data) +} diff --git a/client/service/clusters.go b/client/service/clusters.go new file mode 100644 index 000000000..f0be61037 --- /dev/null +++ b/client/service/clusters.go @@ -0,0 +1,260 @@ +package service + +import ( + "encoding/json" + "errors" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "log" + "net/http" + "time" +) + +type ClustersAPI struct { + Client DBApiClient +} + +func (a ClustersAPI) init(client DBApiClient) ClustersAPI { + a.Client = client + return a +} + +// Create creates a new Spark cluster +func (a ClustersAPI) Create(cluster model.Cluster) (model.ClusterInfo, error) { + var clusterInfo model.ClusterInfo + + resp, err := a.Client.performQuery(http.MethodPost, "/clusters/create", "2.0", nil, cluster) + if err != nil { + return clusterInfo, err + } + + err = json.Unmarshal(resp, &clusterInfo) + return clusterInfo, err +} + +// Update edits the configuration of a cluster to match the provided attributes and size +func (a ClustersAPI) Edit(clusterInfo model.ClusterInfo) error { + _, err := a.Client.performQuery(http.MethodPost, "/clusters/edit", "2.0", nil, clusterInfo) + return err +} + +// Start starts a terminated Spark cluster given its ID +func (a ClustersAPI) Start(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/start", "2.0", nil, data) + return err +} + +// Restart restart a Spark cluster given its ID. If the cluster is not in a RUNNING state, nothing will happen. +func (a ClustersAPI) Restart(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/restart", "2.0", nil, data) + return err +} + +func (a ClustersAPI) WaitForClusterRunning(clusterID string, sleepDurationSeconds time.Duration, timeoutDurationMinutes time.Duration) error { + errChan := make(chan error, 1) + go func() { + for { + clusterInfo, err := a.Get(clusterID) + if err != nil { + errChan <- err + } + if clusterInfo.State == model.ClusterStateRunning { + errChan <- nil + } else if model.ContainsClusterState(model.ClusterStateNonRunnable, clusterInfo.State) { + errChan <- errors.New("Cluster is in a non runnable state will not be able to transition to running, needs " + + "to be started again. Current state: " + string(clusterInfo.State)) + } + log.Println("Waiting for cluster to go to running, current state is: " + string(clusterInfo.State)) + time.Sleep(sleepDurationSeconds * time.Second) + } + }() + select { + case err := <-errChan: + return err + case <-time.After(timeoutDurationMinutes * time.Minute): + defer a.Start(clusterID) + return errors.New("Timed out cluster has not reached running state") + } +} + +func (a ClustersAPI) WaitForClusterTerminated(clusterID string, sleepDurationSeconds time.Duration, timeoutDurationMinutes time.Duration) error { + errChan := make(chan error, 1) + go func() { + for { + clusterInfo, err := a.Get(clusterID) + if err != nil { + errChan <- err + } + if clusterInfo.State == model.ClusterStateTerminated { + errChan <- nil + } else if model.ContainsClusterState(model.ClusterStateNonTerminating, clusterInfo.State) { + errChan <- errors.New("Cluster is in a non runnable state will not be able to transition to terminated, needs " + + "to be terminated again. Current state: " + string(clusterInfo.State)) + } + log.Println("Waiting for cluster to go to terminate, current state is: " + string(clusterInfo.State)) + time.Sleep(sleepDurationSeconds * time.Second) + } + }() + select { + case err := <-errChan: + return err + case <-time.After(timeoutDurationMinutes * time.Minute): + defer a.Delete(clusterID) + return errors.New("Timed out cluster has not reached terminated state") + } +} + +//// Resize resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a RUNNING state. +//func (a ClustersAPI) Resize(clusterID string, clusterSize models.ClusterSize) error { +// data := struct { +// ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` +// models.ClusterSize +// }{ +// clusterID, +// clusterSize, +// } +// _, err := a.Client.performQuery(http.MethodPost, "/clusters/resize", data, nil) +// return err +//} + +// Terminate terminates a Spark cluster given its ID +func (a ClustersAPI) Terminate(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/delete", "2.0", nil, data) + return err +} + +// Delete is an alias of Terminate +func (a ClustersAPI) Delete(clusterID string) error { + return a.Terminate(clusterID) +} + +// PermanentDelete permanently delete a cluster +func (a ClustersAPI) PermanentDelete(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/permanent-delete", "2.0", nil, data) + return err +} + +// Read retrieves the information for a cluster given its identifier +func (a ClustersAPI) Get(clusterID string) (model.ClusterInfo, error) { + var clusterInfo model.ClusterInfo + + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + resp, err := a.Client.performQuery(http.MethodGet, "/clusters/get", "2.0", nil, data) + if err != nil { + return clusterInfo, err + } + + err = json.Unmarshal(resp, &clusterInfo) + return clusterInfo, err +} + +// Pin ensure that an interactive cluster configuration is retained even after a cluster has been terminated for more than 30 days +func (a ClustersAPI) Pin(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/pin", "2.0", nil, data) + return err +} + +// Unpin allows the cluster to eventually be removed from the list returned by the List API +func (a ClustersAPI) Unpin(clusterID string) error { + data := struct { + ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"` + }{ + clusterID, + } + _, err := a.Client.performQuery(http.MethodPost, "/clusters/unpin", "2.0", nil, data) + return err +} + +// List return information about all pinned clusters, currently active clusters, +// up to 70 of the most recently terminated interactive clusters in the past 30 days, +// and up to 30 of the most recently terminated job clusters in the past 30 days +func (a ClustersAPI) List() ([]model.ClusterInfo, error) { + var clusterList = struct { + Clusters []model.ClusterInfo `json:"clusters,omitempty" url:"clusters,omitempty"` + }{} + + resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list", "2.0", nil, nil) + if err != nil { + return clusterList.Clusters, err + } + + err = json.Unmarshal(resp, &clusterList) + return clusterList.Clusters, err +} + +// ListNodeTypes returns a list of supported Spark node types +func (a ClustersAPI) ListNodeTypes() ([]model.NodeType, error) { + var nodeTypeList = struct { + NodeTypes []model.NodeType `json:"node_types,omitempty" url:"node_types,omitempty"` + }{} + + resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list-node-types", "2.0", nil, nil) + if err != nil { + return nodeTypeList.NodeTypes, err + } + + err = json.Unmarshal(resp, &nodeTypeList) + return nodeTypeList.NodeTypes, err +} + +//// SparkVersions return the list of available Spark versions +//func (a ClustersAPI) SparkVersions() ([]model.SparkVersion, error) { +// var versionsList = struct { +// Versions []models.SparkVersion `json:"versions,omitempty" url:"versions,omitempty"` +// }{} +// +// resp, err := a.Client.performQuery(http.MethodGet, "/clusters/spark-versions", nil, nil) +// if err != nil { +// return versionsList.Versions, err +// } +// +// err = json.Unmarshal(resp, &versionsList) +// return versionsList.Versions, err +//} + +// ClustersListZonesResponse is the response from ListZones +//type ClustersListZonesResponse struct { +// Zones []string `json:"zones,omitempty" url:"zones,omitempty"` +// DefaultZone string `json:"default_zone,omitempty" url:"default_zone,omitempty"` +//} +// +//// ListZones returns a list of availability zones where clusters can be created in (ex: us-west-2a) +//func (a ClustersAPI) ListZones() (ClustersListZonesResponse, error) { +// var zonesList ClustersListZonesResponse +// +// resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list-zones", nil, nil) +// if err != nil { +// return zonesList, err +// } +// +// err = json.Unmarshal(resp, &zonesList) +// return zonesList, err +//} diff --git a/client/service/clusters_integration_test.go b/client/service/clusters_integration_test.go new file mode 100644 index 000000000..d03796a5a --- /dev/null +++ b/client/service/clusters_integration_test.go @@ -0,0 +1,55 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestListClustersIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + + client := GetIntegrationDBAPIClient() + + cluster := model.Cluster{ + NumWorkers: 1, + ClusterName: "my-cluster", + SparkEnvVars: map[string]string{ + "PYSPARK_PYTHON": "/databricks/python3/bin/python3", + }, + SparkVersion: "6.2.x-scala2.11", + NodeTypeID: GetCloudInstanceType(client), + DriverNodeTypeID: GetCloudInstanceType(client), + IdempotencyToken: "my-cluster", + AutoterminationMinutes: 20, + } + + clusterInfo, err := client.Clusters().Create(cluster) + assert.NoError(t, err, err) + + clusterReadInfo, err := client.Clusters().Get(clusterInfo.ClusterID) + assert.NoError(t, err, err) + + defer func() { + err = client.Clusters().WaitForClusterTerminated(clusterReadInfo.ClusterID, 10, 20) + assert.NoError(t, err, err) + + err = client.Clusters().Unpin(clusterReadInfo.ClusterID) + assert.NoError(t, err, err) + + err = client.Clusters().PermanentDelete(clusterReadInfo.ClusterID) + assert.NoError(t, err, err) + }() + + err = client.Clusters().Pin(clusterReadInfo.ClusterID) + assert.NoError(t, err, err) + + err = client.Clusters().WaitForClusterRunning(clusterReadInfo.ClusterID, 10, 20) + assert.NoError(t, err, err) + + err = client.Clusters().Delete(clusterReadInfo.ClusterID) + assert.NoError(t, err, err) + +} diff --git a/client/service/groups.go b/client/service/groups.go new file mode 100644 index 000000000..c61935260 --- /dev/null +++ b/client/service/groups.go @@ -0,0 +1,105 @@ +package service + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// TokensAPI exposes the Secrets API +type GroupsAPI struct { + Client DBApiClient +} + +func (a GroupsAPI) init(client DBApiClient) GroupsAPI { + a.Client = client + return a +} + +func (a GroupsAPI) Create(groupName string, members []string) (model.Group, error) { + var group model.Group + + scimGroupRequest := struct { + Schemas []model.URN `json:"schemas,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Members []model.MemberListItem `json:"members,omitempty"` + }{} + scimGroupRequest.Schemas = []model.URN{model.GroupSchema} + scimGroupRequest.DisplayName = groupName + + scimGroupRequest.Members = []model.MemberListItem{} + for _, member := range members { + scimGroupRequest.Members = append(scimGroupRequest.Members, model.MemberListItem{Value: member}) + } + + resp, err := a.Client.performQuery(http.MethodPost, "/preview/scim/v2/Groups", "2.0", scimHeaders, scimGroupRequest) + if err != nil { + return group, err + } + + err = json.Unmarshal(resp, &group) + return group, err +} + +func (a GroupsAPI) Read(groupID string) (model.Group, error) { + var group model.Group + groupPath := fmt.Sprintf("/preview/scim/v2/Groups/%v", groupID) + + resp, err := a.Client.performQuery(http.MethodGet, groupPath, "2.0", scimHeaders, nil) + if err != nil { + return group, err + } + + err = json.Unmarshal(resp, &group) + return group, err +} + +func (a GroupsAPI) Update(groupID string, addMembers []string, removeMembers []string) error { + groupPath := fmt.Sprintf("/preview/scim/v2/Groups/%v", groupID) + + var addOperations model.GroupPatchOperations + var removeOperations model.GroupPatchOperations + + groupPatchRequest := model.GroupPatchRequest{ + Schemas: []model.URN{model.PatchOp}, + Operations: []model.GroupPatchOperations{}, + } + + if addMembers == nil && removeMembers == nil { + return errors.New("Empty members list to add or to remove.") + } + + if len(addMembers) > 0 { + addOperations = model.GroupPatchOperations{ + Op: "add", + Value: &model.MembersValue{ + Members: []model.MemberListItem{}, + }, + } + for _, addMember := range addMembers { + addOperations.Value.Members = append(addOperations.Value.Members, model.MemberListItem{Value: addMember}) + } + groupPatchRequest.Operations = append(groupPatchRequest.Operations, addOperations) + } + + for _, removeMember := range removeMembers { + path := fmt.Sprintf("members[value eq \"%s\"]", removeMember) + removeOperations = model.GroupPatchOperations{ + Op: "remove", + Path: path, + } + groupPatchRequest.Operations = append(groupPatchRequest.Operations, removeOperations) + } + + _, err := a.Client.performQuery(http.MethodPatch, groupPath, "2.0", scimHeaders, groupPatchRequest) + + return err +} + +func (a GroupsAPI) Delete(groupID string) error { + groupPath := fmt.Sprintf("/preview/scim/v2/Groups/%v", groupID) + _, err := a.Client.performQuery(http.MethodDelete, groupPath, "2.0", scimHeaders, nil) + return err +} diff --git a/client/service/groups_integration_test.go b/client/service/groups_integration_test.go new file mode 100644 index 000000000..ba88375b7 --- /dev/null +++ b/client/service/groups_integration_test.go @@ -0,0 +1,50 @@ +package service + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestFmt(t *testing.T) { + str := fmt.Sprintf("members[value eq \"%s\"]", "1000") + t.Log(str) +} + +func TestGroup(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + client := GetIntegrationDBAPIClient() + + user, err := client.Users().Create("test-acc@databricks.com", "test account", nil, nil) + assert.NoError(t, err, err) + + user2, err := client.Users().Create("test-acc2@databricks.com", "test account", nil, nil) + assert.NoError(t, err, err) + + //Create empty group + group, err := client.Groups().Create("my-test-group", nil) + assert.NoError(t, err, err) + + defer func() { + err := client.Groups().Delete(group.ID) + assert.NoError(t, err, err) + err = client.Users().Delete(user.ID) + assert.NoError(t, err, err) + err = client.Users().Delete(user2.ID) + assert.NoError(t, err, err) + }() + + group, err = client.Groups().Read(group.ID) + //t.Log(group.ID) + err = client.Groups().Update(group.ID, []string{user.ID, user2.ID}, nil) + assert.NoError(t, err, err) + + err = client.Groups().Update(group.ID, nil, []string{user.ID}) + assert.NoError(t, err, err) + + group, err = client.Groups().Read(group.ID) + assert.True(t, len(group.Members) == 1) + assert.True(t, group.Members[0].Value == user2.ID) +} diff --git a/client/service/instance_pools.go b/client/service/instance_pools.go new file mode 100644 index 000000000..e44afe377 --- /dev/null +++ b/client/service/instance_pools.go @@ -0,0 +1,65 @@ +package service + +import ( + "encoding/json" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "log" + "net/http" +) + +type InstancePoolsAPI struct { + Client DBApiClient +} + +func (a InstancePoolsAPI) init(client DBApiClient) InstancePoolsAPI { + a.Client = client + return a +} + +// Create creates the instance pool to given the instance pool configuration +func (a InstancePoolsAPI) Create(instancePool model.InstancePool) (model.InstancePoolInfo, error) { + var instancePoolInfo model.InstancePoolInfo + + resp, err := a.Client.performQuery(http.MethodPost, "/instance-pools/create", "2.0", nil, instancePool) + if err != nil { + return instancePoolInfo, err + } + log.Println(resp) + err = json.Unmarshal(resp, &instancePoolInfo) + return instancePoolInfo, err +} + +// Update edits the configuration of a instance pool to match the provided attributes and size +func (a InstancePoolsAPI) Update(instancePoolInfo model.InstancePoolInfo) error { + _, err := a.Client.performQuery(http.MethodPost, "/instance-pools/edit", "2.0", nil, instancePoolInfo) + return err +} + +// Read retrieves the information for a instance pool given its identifier +func (a InstancePoolsAPI) Read(instancePoolId string) (model.InstancePoolInfo, error) { + var instancePoolInfo model.InstancePoolInfo + + data := struct { + InstancePoolId string `json:"instance_pool_id,omitempty" url:"instance_pool_id,omitempty"` + }{ + instancePoolId, + } + resp, err := a.Client.performQuery(http.MethodGet, "/instance-pools/get", "2.0", nil, data) + if err != nil { + return instancePoolInfo, err + } + + err = json.Unmarshal(resp, &instancePoolInfo) + return instancePoolInfo, err +} + +// Terminate terminates a instance pool given its ID +func (a InstancePoolsAPI) Delete(instancePoolId string) error { + data := struct { + InstancePoolId string `json:"instance_pool_id,omitempty" url:"instance_pool_id,omitempty"` + }{ + instancePoolId, + } + _, err := a.Client.performQuery(http.MethodPost, "/instance-pools/delete", "2.0", nil, data) + return err +} diff --git a/client/service/instance_pools_integration_test.go b/client/service/instance_pools_integration_test.go new file mode 100644 index 000000000..2f9a3e147 --- /dev/null +++ b/client/service/instance_pools_integration_test.go @@ -0,0 +1,59 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestInstancePools(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + client := GetIntegrationDBAPIClient() + + pool := model.InstancePool{ + InstancePoolName: "my_instance_pool", + MinIdleInstances: 0, + MaxCapacity: 10, + NodeTypeId: GetCloudInstanceType(client), + IdleInstanceAutoTerminationMinutes: 20, + PreloadedSparkVersions: []string{ + "6.3.x-scala2.11", + }, + } + poolInfo, err := client.InstancePools().Create(pool) + assert.NoError(t, err, err) + + defer func() { + err := client.InstancePools().Delete(poolInfo.InstancePoolId) + assert.NoError(t, err, err) + }() + + poolReadInfo, err := client.InstancePools().Read(poolInfo.InstancePoolId) + assert.NoError(t, err, err) + assert.Equal(t, poolInfo.InstancePoolId, poolReadInfo.InstancePoolId) + assert.Equal(t, pool.InstancePoolName, poolReadInfo.InstancePoolName) + assert.Equal(t, pool.MinIdleInstances, poolReadInfo.MinIdleInstances) + assert.Equal(t, pool.MaxCapacity, poolReadInfo.MaxCapacity) + assert.Equal(t, pool.NodeTypeId, poolReadInfo.NodeTypeId) + assert.Equal(t, pool.IdleInstanceAutoTerminationMinutes, poolReadInfo.IdleInstanceAutoTerminationMinutes) + + err = client.InstancePools().Update(model.InstancePoolInfo{ + InstancePoolId: poolReadInfo.InstancePoolId, + InstancePoolName: "my_instance_pool", + MinIdleInstances: 0, + MaxCapacity: 20, + NodeTypeId: GetCloudInstanceType(client), + IdleInstanceAutoTerminationMinutes: 20, + PreloadedSparkVersions: []string{ + "6.3.x-scala2.11", + }, + }) + assert.NoError(t, err, err) + + poolReadInfo, err = client.InstancePools().Read(poolInfo.InstancePoolId) + assert.NoError(t, err, err) + assert.Equal(t, poolReadInfo.MaxCapacity, int32(20)) + +} diff --git a/client/service/instance_pools_test.go b/client/service/instance_pools_test.go new file mode 100644 index 000000000..fd50e978d --- /dev/null +++ b/client/service/instance_pools_test.go @@ -0,0 +1,230 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" + "testing" +) + +//go:generate easytags $GOFILE + +func TestInstancePoolsAPI_Create(t *testing.T) { + type args struct { + InstancePool *model.InstancePool `json:"instance_pool"` + } + tests := []struct { + name string + response string + args args + want model.InstancePoolInfo + wantErr bool + }{ + { + name: "Basic test", + response: `{ + "instance_pool_id": "0101-120000-brick1-pool-ABCD1234" + }`, + args: args{ + InstancePool: &model.InstancePool{ + InstancePoolName: "", + MinIdleInstances: 0, + MaxCapacity: 10, + NodeTypeId: "Standard_DS3_v2", + IdleInstanceAutoTerminationMinutes: 60, + EnableElasticDisk: false, + DiskSpec: &model.InstancePoolDiskSpec{ + DiskType: &model.InstancePoolDiskType{ + AzureDiskVolumeType: "", + }, + DiskCount: 1, + DiskSize: 10, + }, + }, + }, + want: model.InstancePoolInfo{ + InstancePoolId: "0101-120000-brick1-pool-ABCD1234", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input model.InstancePool + AssertRequestWithMockServer(t, tt.args.InstancePool, http.MethodPost, "/api/2.0/instance-pools/create", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.InstancePools().Create(*tt.args.InstancePool) + }) + }) + } +} + +func TestInstancePoolsAPI_Delete(t *testing.T) { + type args struct { + InstancePoolId string `json:"instance_pool_id"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + InstancePoolId: "0101-120000-brick1-pool-ABCD1234", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/instance-pools/delete", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.InstancePools().Delete(tt.args.InstancePoolId) + }) + }) + } +} + +func TestInstancePoolsAPI_Update(t *testing.T) { + type args struct { + InstancePoolInfo *model.InstancePoolInfo `json:"instance_pool_info"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + InstancePoolInfo: &model.InstancePoolInfo{ + InstancePoolId: "0101-120000-brick1-pool-ABCD1234", + MinIdleInstances: 0, + MaxCapacity: 10, + NodeTypeId: "Standard_DS3_v2", + IdleInstanceAutoTerminationMinutes: 60, + EnableElasticDisk: false, + DiskSpec: &model.InstancePoolDiskSpec{ + DiskType: &model.InstancePoolDiskType{ + AzureDiskVolumeType: "", + }, + DiskCount: 1, + DiskSize: 10, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input model.InstancePoolInfo + AssertRequestWithMockServer(t, tt.args.InstancePoolInfo, http.MethodPost, "/api/2.0/instance-pools/edit", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.InstancePools().Update(*tt.args.InstancePoolInfo) + }) + }) + } +} + +func TestInstancePoolsAPI_Read(t *testing.T) { + type args struct { + InstancePoolId string `json:"instance_pool_id"` + } + tests := []struct { + name string + response string + args args + want model.InstancePoolInfo + wantErr bool + }{ + { + name: "Basic Test", + response: `{ + "instance_pool_name": "mypool", + "min_idle_instances": 0, + "aws_attributes": { + "availability": "SPOT", + "zone_id": "us-west-2a", + "spot_bid_price_percent": 100 + }, + "node_type_id": "c4.2xlarge", + "idle_instance_autotermination_minutes": 60, + "enable_elastic_disk": false, + "disk_spec": { + "disk_type": { + "ebs_volume_type": "GENERAL_PURPOSE_SSD" + }, + "disk_count": 1, + "disk_size": 100 + }, + "preloaded_spark_versions": [ + "5.4.x-scala2.11" + ], + "instance_pool_id": "101-120000-brick1-pool-ABCD1234", + "default_tags": { + "Vendor": "Databricks", + "DatabricksInstancePoolCreatorId": "100125", + "DatabricksInstancePoolId": "101-120000-brick1-pool-ABCD1234" + }, + "state": "ACTIVE", + "stats": { + "used_count": 10, + "idle_count": 5, + "pending_used_count": 5, + "pending_idle_count": 5 + }, + "status": {} + }`, + args: args{ + InstancePoolId: "101-120000-brick1-pool-ABCD1234", + }, + want: model.InstancePoolInfo{ + InstancePoolId: "101-120000-brick1-pool-ABCD1234", + InstancePoolName: "mypool", + MinIdleInstances: 0, + AwsAttributes: &model.InstancePoolAwsAttributes{ + Availability: model.AwsAvailabilitySpot, + ZoneId: "us-west-2a", + SpotBidPricePercent: 100, + }, + NodeTypeId: "c4.2xlarge", + IdleInstanceAutoTerminationMinutes: 60, + EnableElasticDisk: false, + DiskSpec: &model.InstancePoolDiskSpec{ + DiskType: &model.InstancePoolDiskType{ + EbsVolumeType: model.EbsVolumeTypeGeneralPurposeSsd, + }, + DiskCount: 1, + DiskSize: 100, + }, + DefaultTags: map[string]string{ + "Vendor": "Databricks", + "DatabricksInstancePoolCreatorId": "100125", + "DatabricksInstancePoolId": "101-120000-brick1-pool-ABCD1234", + }, + PreloadedSparkVersions: []string{ + "5.4.x-scala2.11", + }, + State: "ACTIVE", + Stats: &model.InstancePoolStats{ + UsedCount: 10, + IdleCount: 5, + PendingUsedCount: 5, + PendingIdleCount: 5, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input model.InstancePoolInfo + AssertRequestWithMockServer(t, &tt.args, http.MethodGet, "/api/2.0/instance-pools/get?instance_pool_id=101-120000-brick1-pool-ABCD1234", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.InstancePools().Read(tt.args.InstancePoolId) + }) + }) + } +} diff --git a/client/service/main_test.go b/client/service/main_test.go new file mode 100644 index 000000000..bbb353991 --- /dev/null +++ b/client/service/main_test.go @@ -0,0 +1,95 @@ +package service + +import ( + "encoding/json" + "github.com/joho/godotenv" + "github.com/r3labs/diff" + "github.com/stikkireddy/databricks-tf-provider/client" + "github.com/stretchr/testify/assert" + "log" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strings" + "testing" +) + +func TestMain(m *testing.M) { + err := godotenv.Load("../../.env") + if err != nil { + log.Println("Failed to load environment") + } + code := m.Run() + os.Exit(code) +} + +func DeserializeJson(req *http.Request, m interface{}) error { + dec := json.NewDecoder(req.Body) + dec.DisallowUnknownFields() + + err := dec.Decode(&m) + return err +} + +func compare(t *testing.T, a interface{}, b interface{}) { + difference, err := diff.Diff(a, b) + assert.NoError(t, err, err) + jsonStr, _ := json.Marshal(difference) + assert.True(t, reflect.DeepEqual(a, b), string(jsonStr)) +} + +func GetIntegrationDBAPIClient() *DBApiClient { + var o client.DBClientOption + o.Token = os.Getenv("TOKEN") + o.Host = os.Getenv("HOST") + + var c DBApiClient + c.Init(o) + return &c +} + +func GetCloudInstanceType(c *DBApiClient) string { + if strings.Contains(c.Option.Host, "azure") { + return "Standard_DS3_v2" + } else { + return "i3.xlarge" + } +} + +func AssertRequestWithMockServer(t *testing.T, rawPayloadArgs interface{}, requestMethod string, requestURI string, input interface{}, response string, want interface{}, wantErr bool, apiCall func(client DBApiClient) (interface{}, error)) { + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // Test request parameters + assert.Equal(t, req.Method, requestMethod) + assert.Equal(t, req.RequestURI, requestURI) + if requestMethod == http.MethodPost { + err := DeserializeJson(req, &input) + assert.NoError(t, err, err) + t.Log(input) + t.Log(rawPayloadArgs) + compare(t, rawPayloadArgs, input) + } + + _, err := rw.Write([]byte(response)) + assert.NoError(t, err, err) + + })) + // Close the server when test finishes + defer server.Close() + var o client.DBClientOption + o.Host = server.URL + + dbClient := DBApiClient{Option: o} + + output, err := apiCall(dbClient) + + if output != nil { + compare(t, want, output) + } + + if wantErr { + assert.Error(t, err, err) + } else { + assert.NoError(t, err, err) + } +} diff --git a/client/service/secret_acls.go b/client/service/secret_acls.go new file mode 100644 index 000000000..ce6e21720 --- /dev/null +++ b/client/service/secret_acls.go @@ -0,0 +1,85 @@ +package service + +import ( + "encoding/json" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// SecretsAPI exposes the Secrets API +type SecretAclsAPI struct { + Client DBApiClient +} + +func (a SecretAclsAPI) init(client DBApiClient) SecretAclsAPI { + a.Client = client + return a +} + +// Create creates or overwrites the ACL associated with the given principal (user or group) on the specified scope point +func (a SecretAclsAPI) Create(scope string, principal string, permission model.AclPermission) error { + data := struct { + Scope string `json:"scope,omitempty"` + Principal string `json:"principal,omitempty"` + Permission model.AclPermission `json:"permission,omitempty"` + }{ + scope, + principal, + permission, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/acls/put", "2.0", nil, data) + return err +} + +// Delete deletes the given ACL on the given scope +func (a SecretAclsAPI) Delete(scope string, principal string) error { + data := struct { + Scope string `json:"scope,omitempty"` + Principal string `json:"principal,omitempty"` + }{ + scope, + principal, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/acls/delete", "2.0", nil, data) + return err +} + +// Read describe the details about the given ACL, such as the group and permission +func (a SecretAclsAPI) Read(scope string, principal string) (model.AclItem, error) { + var aclItem model.AclItem + + data := struct { + Scope string `json:"scope,omitempty" url:"scope,omitempty"` + Principal string `json:"principal,omitempty" url:"principal,omitempty"` + }{ + scope, + principal, + } + resp, err := a.Client.performQuery(http.MethodGet, "/secrets/acls/get", "2.0", nil, data) + if err != nil { + return aclItem, err + } + + err = json.Unmarshal(resp, &aclItem) + return aclItem, err +} + +// List lists the ACLs set on the given scope +func (a SecretAclsAPI) List(scope string) ([]model.AclItem, error) { + var aclItem struct { + Items []model.AclItem `json:"items,omitempty"` + } + + data := struct { + Scope string `json:"scope,omitempty" url:"scope,omitempty"` + }{ + scope, + } + resp, err := a.Client.performQuery(http.MethodGet, "/secrets/acls/list", "2.0", nil, data) + if err != nil { + return aclItem.Items, err + } + + err = json.Unmarshal(resp, &aclItem) + return aclItem.Items, err +} diff --git a/client/service/secret_acls_test.go b/client/service/secret_acls_test.go new file mode 100644 index 000000000..b67e4f76e --- /dev/null +++ b/client/service/secret_acls_test.go @@ -0,0 +1,162 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" + "testing" +) + +//go:generate easytags $GOFILE + +func TestSecretAclsAPI_Create(t *testing.T) { + type args struct { + Scope string `json:"scope"` + Principal string `json:"principal"` + Permission model.AclPermission `json:"permission"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic Test", + response: "", + args: args{ + Scope: "my-scope", + Principal: "my-principal", + Permission: model.AclPermissionManage, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/acls/put", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.SecretAcls().Create(tt.args.Scope, tt.args.Principal, tt.args.Permission) + }) + }) + } +} + +func TestSecretAclsAPI_Delete(t *testing.T) { + type args struct { + Scope string `json:"scope"` + Principal string `json:"principal"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + Scope: "my-scope", + Principal: "my-principal", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/acls/delete", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.SecretAcls().Delete(tt.args.Scope, tt.args.Principal) + }) + }) + } +} + +func TestSecretAclsAPI_List(t *testing.T) { + type args struct { + Scope string `json:"scope"` + } + tests := []struct { + name string + response string + args args + want []model.AclItem + wantErr bool + }{ + { + name: "Basic test", + response: `{ + "items": [ + { + "principal": "admins", + "permission": "MANAGE" + },{ + "principal": "data-scientists", + "permission": "READ" + } + ] + }`, + args: args{ + Scope: "my-scope", + }, + want: []model.AclItem{ + { + Principal: "admins", + Permission: model.AclPermissionManage, + }, + { + Principal: "data-scientists", + Permission: model.AclPermissionRead, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodGet, "/api/2.0/secrets/acls/list?scope=my-scope", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.SecretAcls().List(tt.args.Scope) + }) + }) + } +} + +func TestSecretAclsAPI_Read(t *testing.T) { + type args struct { + Scope string `json:"scope"` + Principal string `json:"principal"` + } + tests := []struct { + name string + response string + args args + want model.AclItem + wantErr bool + }{ + { + name: "Basic test", + response: `{ + "principal": "data-scientists", + "permission": "READ" + }`, + args: args{ + Scope: "my-scope", + Principal: "my-principal", + }, + want: model.AclItem{ + Principal: "data-scientists", + Permission: model.AclPermissionRead, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodGet, "/api/2.0/secrets/acls/get?principal=my-principal&scope=my-scope", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.SecretAcls().Read(tt.args.Scope, tt.args.Principal) + }) + }) + } +} diff --git a/client/service/secret_scopes.go b/client/service/secret_scopes.go new file mode 100644 index 000000000..5d0619ba5 --- /dev/null +++ b/client/service/secret_scopes.go @@ -0,0 +1,71 @@ +package service + +import ( + "encoding/json" + "errors" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// SecretsAPI exposes the Secrets API +type SecretScopesAPI struct { + Client DBApiClient +} + +func (a SecretScopesAPI) init(client DBApiClient) SecretScopesAPI { + a.Client = client + return a +} + +// CreateSecretScope creates a new secret scope +func (a SecretScopesAPI) Create(scope string, initialManagePrincipal string) error { + data := struct { + Scope string `json:"scope,omitempty"` + InitialManagePrincipal string `json:"initial_manage_principal,omitempty"` + }{ + scope, + initialManagePrincipal, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/scopes/create", "2.0", nil, data) + return err +} + +// DeleteSecretScope deletes a secret scope +func (a SecretScopesAPI) Delete(scope string) error { + data := struct { + Scope string `json:"scope,omitempty" ` + }{ + scope, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/scopes/delete", "2.0", nil, data) + return err +} + +// List lists all secret scopes available in the workspace +func (a SecretScopesAPI) List() ([]model.SecretScope, error) { + var listSecretScopesResponse struct { + Scopes []model.SecretScope `json:"scopes,omitempty"` + } + + resp, err := a.Client.performQuery(http.MethodGet, "/secrets/scopes/list", "2.0", nil, nil) + if err != nil { + return listSecretScopesResponse.Scopes, err + } + + err = json.Unmarshal(resp, &listSecretScopesResponse) + return listSecretScopesResponse.Scopes, err +} + +func (a SecretScopesAPI) Read(scopeName string) (model.SecretScope, error) { + var secretScope model.SecretScope + scopes, err := a.List() + if err != nil { + return secretScope, err + } + for _, scope := range scopes { + if scope.Name == scopeName { + return scope, nil + } + } + return secretScope, errors.New("No Secret Scope found with scope name " + scopeName) +} diff --git a/client/service/secret_scopes_test.go b/client/service/secret_scopes_test.go new file mode 100644 index 000000000..395eebdb9 --- /dev/null +++ b/client/service/secret_scopes_test.go @@ -0,0 +1,152 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" + "testing" +) + +//go:generate easytags $GOFILE + +func TestSecretScopesAPI_Create(t *testing.T) { + type args struct { + Scope string `json:"scope"` + InitialManagePrincipal string `json:"initial_manage_principal"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + Scope: "my-scope", + InitialManagePrincipal: "users", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/scopes/create", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.SecretScopes().Create(tt.args.Scope, tt.args.InitialManagePrincipal) + }) + }) + } +} + +func TestSecretScopesAPI_Delete(t *testing.T) { + type args struct { + Scope string `json:"scope"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + Scope: "my-scope", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/scopes/delete", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.SecretScopes().Delete(tt.args.Scope) + }) + }) + } +} + +func TestSecretScopesAPI_Read(t *testing.T) { + type args struct { + ScopeName string `json:"scope_name"` + } + tests := []struct { + name string + response string + args args + want model.SecretScope + wantErr bool + }{ + { + name: "Basic test", + response: `{ + "scopes": [{ + "name": "my-databricks-scope", + "backend_type": "DATABRICKS" + },{ + "name": "mount-points", + "backend_type": "DATABRICKS" + }] + }`, + args: args{ + ScopeName: "my-databricks-scope", + }, + want: model.SecretScope{ + Name: "my-databricks-scope", + BackendType: model.ScopeBackendTypeDatabricks, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodGet, "/api/2.0/secrets/scopes/list?", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.SecretScopes().Read(tt.args.ScopeName) + }) + }) + } +} + +func TestSecretScopesAPI_List(t *testing.T) { + + tests := []struct { + name string + response string + want []model.SecretScope + wantErr bool + }{ + { + name: "Basic test", + response: `{ + "scopes": [{ + "name": "my-databricks-scope", + "backend_type": "DATABRICKS" + },{ + "name": "mount-points", + "backend_type": "DATABRICKS" + }] + }`, + want: []model.SecretScope{ + { + Name: "my-databricks-scope", + BackendType: model.ScopeBackendTypeDatabricks, + }, + { + Name: "mount-points", + BackendType: model.ScopeBackendTypeDatabricks, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AssertRequestWithMockServer(t, nil, http.MethodGet, "/api/2.0/secrets/scopes/list?", nil, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.SecretScopes().List() + }) + }) + } +} diff --git a/client/service/secrets.go b/client/service/secrets.go new file mode 100644 index 000000000..eeff554fd --- /dev/null +++ b/client/service/secrets.go @@ -0,0 +1,81 @@ +package service + +import ( + "encoding/json" + "errors" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// SecretsAPI exposes the Secrets API +type SecretsAPI struct { + Client DBApiClient +} + +func (a SecretsAPI) init(client DBApiClient) SecretsAPI { + a.Client = client + return a +} + +// PutSecretString creates or modifies a string secret depends on the type of scope backend +func (a SecretsAPI) Create(stringValue, scope, key string) error { + data := struct { + StringValue string `json:"string_value,omitempty"` + Scope string `json:"scope,omitempty"` + Key string `json:"key,omitempty"` + }{ + stringValue, + scope, + key, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/put", "2.0", nil, data) + return err +} + +// DeleteSecret deletes a secret depends on the type of scope backend +func (a SecretsAPI) Delete(scope, key string) error { + data := struct { + Scope string `json:"scope,omitempty"` + Key string `json:"key,omitempty"` + }{ + scope, + key, + } + _, err := a.Client.performQuery(http.MethodPost, "/secrets/delete", "2.0", nil, data) + return err +} + +// ListSecrets lists the secret keys that are stored at this scope +func (a SecretsAPI) List(scope string) ([]model.SecretMetadata, error) { + var secretsList struct { + Secrets []model.SecretMetadata `json:"secrets,omitempty"` + } + + data := struct { + Scope string `json:"scope,omitempty" url:"scope,omitempty"` + }{ + scope, + } + + resp, err := a.Client.performQuery(http.MethodGet, "/secrets/list", "2.0", nil, data) + if err != nil { + return secretsList.Secrets, err + } + + err = json.Unmarshal(resp, &secretsList) + return secretsList.Secrets, err +} + +func (a SecretsAPI) Read(scope string, key string) (model.SecretMetadata, error) { + var secretMeta model.SecretMetadata + secrets, err := a.List(scope) + if err != nil { + return secretMeta, err + } + for _, secret := range secrets { + if secret.Key == key { + return secret, nil + } + } + return secretMeta, errors.New("No Secret Scope found with secret metadata scope name " + scope + " and key " + key) +} diff --git a/client/service/secrets_scopes_acls_integration_test.go b/client/service/secrets_scopes_acls_integration_test.go new file mode 100644 index 000000000..8991bf81f --- /dev/null +++ b/client/service/secrets_scopes_acls_integration_test.go @@ -0,0 +1,68 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSecretsScopesAclsIntegration(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + client := GetIntegrationDBAPIClient() + + testScope := "my-test-scope" + testKey := "my-test-key" + testSecret := "my-test-secret" + initialManagePrincipal := "users" + //TODO: Please replace it with users api and get random user + testPrincipal := "simon.zamarin@databricks.com" + + err := client.SecretScopes().Create(testScope, initialManagePrincipal) + assert.NoError(t, err, err) + + defer func() { + // Deleting scope deletes everything else + err := client.SecretScopes().Delete(testScope) + assert.NoError(t, err, err) + }() + + scopes, err := client.SecretScopes().List() + assert.NoError(t, err, err) + assert.True(t, len(scopes) > 1, "Scopes are empty list") + + scope, err := client.SecretScopes().Read(testScope) + assert.NoError(t, err, err) + assert.Equal(t, testScope, scope.Name, "Scope lookup does not yield same scope") + + err = client.Secrets().Create(testSecret, testScope, testKey) + assert.NoError(t, err, err) + + secrets, err := client.Secrets().List(testScope) + assert.NoError(t, err, err) + assert.True(t, len(secrets) > 0, "Secrets are empty list") + + secret, err := client.Secrets().Read(testScope, testKey) + assert.NoError(t, err, err) + assert.Equal(t, testKey, secret.Key, "Secret lookup does not yield same key") + + err = client.SecretAcls().Create(testScope, testPrincipal, model.AclPermissionManage) + assert.NoError(t, err, err) + + secretAcls, err := client.SecretAcls().List(testScope) + assert.NoError(t, err, err) + assert.True(t, len(secretAcls) > 0, "Secrets acls are empty list") + + secretAcl, err := client.SecretAcls().Read(testScope, testPrincipal) + assert.NoError(t, err, err) + assert.Equal(t, testPrincipal, secretAcl.Principal, "Secret lookup does not yield same key") + assert.Equal(t, model.AclPermissionManage, secretAcl.Permission, "Secret lookup does not yield same key") + + err = client.Secrets().Delete(testScope, testKey) + assert.NoError(t, err, err) + + err = client.SecretAcls().Delete(testScope, testPrincipal) + assert.NoError(t, err, err) + +} diff --git a/client/service/secrets_test.go b/client/service/secrets_test.go new file mode 100644 index 000000000..c561d84a2 --- /dev/null +++ b/client/service/secrets_test.go @@ -0,0 +1,172 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" + "testing" +) + +//go:generate easytags $GOFILE + +func TestSecretsAPI_Create(t *testing.T) { + type args struct { + StringValue string `json:"string_value"` + Scope string `json:"scope"` + Key string `json:"key"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic Test", + response: "", + args: args{ + StringValue: "my-secret", + Scope: "my-scope", + Key: "my-key", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/put", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.Secrets().Create(tt.args.StringValue, tt.args.Scope, tt.args.Key) + }) + }) + } +} + +func TestSecretsAPI_Delete(t *testing.T) { + + type args struct { + Scope string `json:"scope"` + Key string `json:"key"` + } + tests := []struct { + name string + response string + args args + wantErr bool + }{ + { + name: "Basic test", + response: "", + args: args{ + Scope: "my-scope", + Key: "my-key", + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, &tt.args, http.MethodPost, "/api/2.0/secrets/delete", &input, tt.response, nil, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return nil, client.Secrets().Delete(tt.args.Scope, tt.args.Key) + }) + }) + } +} + +func TestSecretsAPI_ListSecrets(t *testing.T) { + type args struct { + Scope string `json:"scope"` + } + tests := []struct { + name string + args args + response string + want []model.SecretMetadata + wantErr bool + }{ + { + name: "Basic Test", + args: args{ + Scope: "my-scope", + }, + response: `{ + "secrets": [ + { + "key": "my-string-key", + "last_updated_timestamp": 1520467595000 + }, + { + "key": "my-byte-key", + "last_updated_timestamp": 1520467595000 + } + ] + }`, + want: []model.SecretMetadata{ + { + Key: "my-string-key", + LastUpdatedTimestamp: 1520467595000, + }, + { + Key: "my-byte-key", + LastUpdatedTimestamp: 1520467595000, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, tt.args, http.MethodGet, "/api/2.0/secrets/list?scope=my-scope", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.Secrets().List(tt.args.Scope) + }) + }) + } +} + +func TestSecretsAPI_Read(t *testing.T) { + type args struct { + Scope string `json:"scope"` + Key string `json:"key"` + } + tests := []struct { + name string + args args + response string + want model.SecretMetadata + wantErr bool + }{ + { + name: "Basic test", + args: args{ + Scope: "my-scope", + Key: "my-string-key", + }, + response: `{ + "secrets": [ + { + "key": "my-string-key", + "last_updated_timestamp": 1520467595000 + }, + { + "key": "my-byte-key", + "last_updated_timestamp": 1520467595000 + } + ] + }`, + want: model.SecretMetadata{ + Key: "my-string-key", + LastUpdatedTimestamp: 1520467595000, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var input args + AssertRequestWithMockServer(t, tt.args, http.MethodGet, "/api/2.0/secrets/list?scope=my-scope", &input, tt.response, tt.want, tt.wantErr, func(client DBApiClient) (interface{}, error) { + return client.Secrets().Read(tt.args.Scope, tt.args.Key) + }) + }) + } +} diff --git a/client/service/tokens.go b/client/service/tokens.go new file mode 100644 index 000000000..3630fe0e7 --- /dev/null +++ b/client/service/tokens.go @@ -0,0 +1,74 @@ +package service + +import ( + "encoding/json" + "errors" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// TokensAPI exposes the Secrets API +type TokensAPI struct { + Client DBApiClient +} + +func (a TokensAPI) init(client DBApiClient) TokensAPI { + a.Client = client + return a +} + +func (a TokensAPI) Create(lifeTimeSeconds int32, comment string) (model.TokenResponse, error) { + var tokenData model.TokenResponse + + tokenCreateRequest := struct { + LifetimeSeconds int32 `json:"lifetime_seconds,omitempty"` + Comment string `json:"comment,omitempty"` + }{ + lifeTimeSeconds, + comment, + } + + tokenCreateResponse, err := a.Client.performQuery(http.MethodPost, "/token/create", "2.0", nil, tokenCreateRequest) + if err != nil { + return tokenData, err + } + + err = json.Unmarshal(tokenCreateResponse, &tokenData) + return tokenData, err +} + +func (a TokensAPI) List() ([]model.TokenInfo, error) { + var tokenListResult struct { + TokenInfos []model.TokenInfo `json:"token_infos,omitempty"` + } + tokenListResponse, err := a.Client.performQuery(http.MethodGet, "/token/list", "2.0", nil, tokenListResult) + if err != nil { + return tokenListResult.TokenInfos, err + } + err = json.Unmarshal(tokenListResponse, &tokenListResult) + return tokenListResult.TokenInfos, err +} + +func (a TokensAPI) Read(tokenID string) (model.TokenInfo, error) { + var tokenInfo model.TokenInfo + tokenList, err := a.List() + if err != nil { + return tokenInfo, err + } + for _, tokenInfoRecord := range tokenList { + if tokenInfoRecord.TokenID == tokenID { + return tokenInfoRecord, nil + } + } + return tokenInfo, errors.New("Unable to locate token: " + tokenID) +} + +func (a TokensAPI) Delete(tokenID string) error { + tokenDeleteRequest := struct { + TokenId string `json:"token_id,omitempty"` + }{ + tokenID, + } + _, err := a.Client.performQuery(http.MethodPost, "/token/delete", "2.0", nil, tokenDeleteRequest) + return err +} diff --git a/client/service/tokens_integration_test.go b/client/service/tokens_integration_test.go new file mode 100644 index 000000000..7ec1da1b9 --- /dev/null +++ b/client/service/tokens_integration_test.go @@ -0,0 +1,33 @@ +package service + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCreateToken(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + + client := GetIntegrationDBAPIClient() + + lifeTimeSeconds := int32(30) + comment := "Hello world" + + token, err := client.Tokens().Create(lifeTimeSeconds, comment) + assert.NoError(t, err, err) + assert.True(t, len(token.TokenValue) > 0, "Token value is empty") + + defer func() { + err := client.Tokens().Delete(token.TokenInfo.TokenID) + assert.NoError(t, err, err) + }() + + _, err = client.Tokens().Read(token.TokenInfo.TokenID) + assert.NoError(t, err, err) + + tokenList, err := client.Tokens().List() + assert.True(t, len(tokenList) > 0, "Token list is empty") + +} diff --git a/client/service/users.go b/client/service/users.go new file mode 100644 index 000000000..619cd66cb --- /dev/null +++ b/client/service/users.go @@ -0,0 +1,93 @@ +package service + +import ( + "encoding/json" + "fmt" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "net/http" +) + +// TokensAPI exposes the Secrets API +type UsersAPI struct { + Client DBApiClient +} + +func (a UsersAPI) init(client DBApiClient) UsersAPI { + a.Client = client + return a +} + +func (a UsersAPI) Create(userName string, displayName string, entitlements []string, roles []string) (model.User, error) { + var user model.User + scimUserRequest := struct { + Schemas []model.URN `json:"schemas,omitempty"` + UserName string `json:"userName,omitempty"` + Entitlements []model.EntitlementsListItem `json:"entitlements,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Roles []model.RoleListItem `json:"roles,omitempty"` + }{} + scimUserRequest.Schemas = []model.URN{model.UserSchema} + scimUserRequest.UserName = userName + scimUserRequest.DisplayName = displayName + scimUserRequest.Entitlements = []model.EntitlementsListItem{} + for _, entitlement := range entitlements { + scimUserRequest.Entitlements = append(scimUserRequest.Entitlements, model.EntitlementsListItem{Value: model.Entitlement(entitlement)}) + } + scimUserRequest.Roles = []model.RoleListItem{} + for _, role := range roles { + scimUserRequest.Roles = append(scimUserRequest.Roles, model.RoleListItem{Value: role}) + } + + resp, err := a.Client.performQuery(http.MethodPost, "/preview/scim/v2/Users", "2.0", scimHeaders, scimUserRequest) + if err != nil { + return user, err + } + + err = json.Unmarshal(resp, &user) + return user, err +} + +func (a UsersAPI) Read(userId string) (model.User, error) { + var user model.User + userPath := fmt.Sprintf("/preview/scim/v2/Users/%v", userId) + + resp, err := a.Client.performQuery(http.MethodGet, userPath, "2.0", scimHeaders, nil) + if err != nil { + return user, err + } + + err = json.Unmarshal(resp, &user) + return user, err +} + +func (a UsersAPI) Update(userId string, userName string, displayName string, entitlements []string, roles []string) error { + userPath := fmt.Sprintf("/preview/scim/v2/Users/%v", userId) + scimUserUpdateRequest := struct { + Schemas []model.URN `json:"schemas,omitempty"` + UserName string `json:"userName,omitempty"` + Entitlements []model.EntitlementsListItem `json:"entitlements,omitempty"` + DisplayName string `json:"displayName,omitempty"` + Roles []model.RoleListItem `json:"roles,omitempty"` + }{} + scimUserUpdateRequest.Schemas = []model.URN{model.UserSchema} + scimUserUpdateRequest.UserName = userName + scimUserUpdateRequest.DisplayName = displayName + scimUserUpdateRequest.Entitlements = []model.EntitlementsListItem{} + for _, entitlement := range entitlements { + scimUserUpdateRequest.Entitlements = append(scimUserUpdateRequest.Entitlements, model.EntitlementsListItem{Value: model.Entitlement(entitlement)}) + } + scimUserUpdateRequest.Roles = []model.RoleListItem{} + for _, role := range roles { + scimUserUpdateRequest.Roles = append(scimUserUpdateRequest.Roles, model.RoleListItem{Value: role}) + } + _, err := a.Client.performQuery(http.MethodPut, userPath, "2.0", scimHeaders, scimUserUpdateRequest) + return err +} + +func (a UsersAPI) Delete(userId string) error { + + userPath := fmt.Sprintf("/preview/scim/v2/Users/%v", userId) + + _, err := a.Client.performQuery(http.MethodDelete, userPath, "2.0", scimHeaders, nil) + return err +} diff --git a/client/service/users_integration_test.go b/client/service/users_integration_test.go new file mode 100644 index 000000000..a0024df98 --- /dev/null +++ b/client/service/users_integration_test.go @@ -0,0 +1,33 @@ +package service + +import ( + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestCreateUser(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + + client := GetIntegrationDBAPIClient() + + user, err := client.Users().Create("testuser@databricks.com", "Display Name", nil, nil) + assert.NoError(t, err, err) + assert.True(t, len(user.ID) > 0, "User id is empty") + idToDelete := user.ID + defer func() { + err := client.Users().Delete(idToDelete) + assert.NoError(t, err, err) + }() + + user, err = client.Users().Read(user.ID) + t.Log(user) + assert.NoError(t, err, err) + + err = client.Users().Update(user.ID, "newtestuser@databricks.com", "Test User", []string{string(model.AllowClusterCreateEntitlement)}, nil) + t.Log(user) + assert.NoError(t, err, err) + +} diff --git a/client/version.go b/client/version.go new file mode 100644 index 000000000..df7d08c79 --- /dev/null +++ b/client/version.go @@ -0,0 +1,12 @@ +package client + +type APIVersion string + +const ( + // APIVersion is the version of the RESTful API of DataBricks + API2 APIVersion = "2.0" + API12 APIVersion = "1.2" + + // SdkVersion is the version of this library + SdkVersion = "0.0.1" +) diff --git a/db/azure_ws_init.go b/db/azure_ws_init.go new file mode 100644 index 000000000..e4fde87ed --- /dev/null +++ b/db/azure_ws_init.go @@ -0,0 +1,179 @@ +package db + +import ( + "encoding/json" + "github.com/stikkireddy/databricks-tf-provider/client" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "log" + "net/http" +) + +const ( + ManagementResourceEndpoint string = "https://management.core.windows.net/" + ADBResourceID string = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d" +) + +type AzureAuth struct { + TokenPayload *TokenPayload + ManagementToken string + AdbWorkspaceResourceID string + AdbAccessToken string + AdbPlatformToken string +} + +type TokenPayload struct { + ManagedResourceGroup string + AzureRegion string + WorkspaceName string + ResourceGroup string + SubscriptionId string + ClientSecret string + ClientID string + TenantID string +} + +type WsProps struct { + ManagedResourceGroupID string `json:"managedResourceGroupId"` +} + +type WorkspaceRequest struct { + Properties *WsProps `json:"properties"` + Name string `json:"name"` + Location string `json:"location"` +} + +func (a *AzureAuth) getManagementToken(option client.DBClientOption) error { + log.Println("Creating Azure Databricks management OAuth token.") + url := "https://login.microsoftonline.com/" + a.TokenPayload.TenantID + "/oauth2/token" + payload := "grant_type=client_credentials&client_id=" + a.TokenPayload.ClientID + "&client_secret=" + a.TokenPayload.ClientSecret + "&resource=" + ManagementResourceEndpoint + headers := map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + "cache-control": "no-cache", + } + + var responseMap map[string]interface{} + resp, err := client.PerformQuery(option, http.MethodPost, url, "2.0", headers, false, true, payload) + if err != nil { + return err + } + err = json.Unmarshal(resp, &responseMap) + if err != nil { + return err + } + a.ManagementToken = responseMap["access_token"].(string) + return nil +} + +func (a *AzureAuth) getWorkspaceId(option client.DBClientOption) error { + log.Println("Getting Workspace ID via management token.") + url := "https://management.azure.com/subscriptions/" + a.TokenPayload.SubscriptionId + "/resourceGroups/" + a.TokenPayload.ResourceGroup + "/providers/Microsoft.Databricks/workspaces/" + a.TokenPayload.WorkspaceName + "" + + "?api-version=2018-04-01" + + payload := &WorkspaceRequest{ + Properties: &WsProps{ManagedResourceGroupID: "/subscriptions/" + a.TokenPayload.SubscriptionId + "/resourceGroups/" + a.TokenPayload.ManagedResourceGroup}, + Name: a.TokenPayload.WorkspaceName, + Location: a.TokenPayload.AzureRegion, + } + headers := map[string]string{ + "Content-Type": "application/json", + "cache-control": "no-cache", + "Authorization": "Bearer " + a.ManagementToken, + } + + var responseMap map[string]interface{} + resp, err := client.PerformQuery(option, http.MethodPut, url, "2.0", headers, true, true, payload) + if err != nil { + return err + } + err = json.Unmarshal(resp, &responseMap) + if err != nil { + return err + } + log.Println(responseMap) + a.AdbWorkspaceResourceID = responseMap["id"].(string) + return err +} + +func (a *AzureAuth) getADBPlatformToken(option client.DBClientOption) error { + log.Println("Creating Azure Databricks platform OAuth token.") + url := "https://login.microsoftonline.com/" + a.TokenPayload.TenantID + "/oauth2/token" + payload := "grant_type=client_credentials&client_id=" + a.TokenPayload.ClientID + "&client_secret=" + a.TokenPayload.ClientSecret + "&resource=" + ADBResourceID + headers := map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + "cache-control": "no-cache", + } + + var responseMap map[string]interface{} + resp, err := client.PerformQuery(option, http.MethodPost, url, "2.0", headers, false, true, payload) + if err != nil { + return err + } + err = json.Unmarshal(resp, &responseMap) + if err != nil { + return err + } + a.AdbPlatformToken = responseMap["access_token"].(string) + return nil +} + +func (a *AzureAuth) getWorkspaceAccessToken(option client.DBClientOption) error { + log.Println("Creating workspace token") + apiLifeTimeInSeconds := int32(600) + comment := "Secret made via SP" + url := "https://" + a.TokenPayload.AzureRegion + ".azuredatabricks.net/api/2.0/token/create" + payload := struct { + LifetimeSeconds int32 `json:"lifetime_seconds,omitempty"` + Comment string `json:"comment,omitempty"` + }{ + apiLifeTimeInSeconds, + comment, + } + headers := map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + "X-Databricks-Azure-Workspace-Resource-Id": a.AdbWorkspaceResourceID, + "X-Databricks-Azure-SP-Management-Token": a.ManagementToken, + "cache-control": "no-cache", + "Authorization": "Bearer " + a.AdbPlatformToken, + } + + var responseMap map[string]interface{} + resp, err := client.PerformQuery(option, http.MethodPost, url, "2.0", headers, true, true, payload) + if err != nil { + return err + } + err = json.Unmarshal(resp, &responseMap) + if err != nil { + return err + } + a.AdbAccessToken = responseMap["token_value"].(string) + return nil +} + +func (a *AzureAuth) initWorkspaceAndGetClient(option client.DBClientOption) (service.DBApiClient, error) { + var dbClient service.DBApiClient + err := a.getManagementToken(option) + if err != nil { + return dbClient, err + } + + err = a.getWorkspaceId(option) + if err != nil { + return dbClient, err + } + err = a.getADBPlatformToken(option) + if err != nil { + return dbClient, err + } + err = a.getWorkspaceAccessToken(option) + if err != nil { + return dbClient, err + } + + var newOption client.DBClientOption + newOption.Token = a.AdbAccessToken + newOption.Host = "https://" + a.TokenPayload.AzureRegion + ".azuredatabricks.net" + dbClient.Init(newOption) + + _, err = dbClient.Clusters().ListNodeTypes() + return dbClient, err +} diff --git a/db/azure_ws_init_test.go b/db/azure_ws_init_test.go new file mode 100644 index 000000000..d727ea6e7 --- /dev/null +++ b/db/azure_ws_init_test.go @@ -0,0 +1,64 @@ +package db + +import ( + "github.com/stikkireddy/databricks-tf-provider/client" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stretchr/testify/assert" + "log" + "os" + + //"os" + "testing" +) + +func GetIntegrationDBClientOptions() *client.DBClientOption { + var o client.DBClientOption + + return &o +} + +func TestAzureAuthCreateApiToken(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test in short mode.") + } + + azureAuth := AzureAuth{ + TokenPayload: &TokenPayload{ + ManagedResourceGroup: os.Getenv("TEST_MANAGED_RESOURCE_GROUP"), + AzureRegion: "centralus", + WorkspaceName: os.Getenv("TEST_WORKSPACE_NAME"), + ResourceGroup: os.Getenv("TEST_RESOURCE_GROUP"), + }, + ManagementToken: "", + AdbWorkspaceResourceID: "", + AdbAccessToken: "", + AdbPlatformToken: "", + } + azureAuth.TokenPayload.SubscriptionId = os.Getenv("ARM_SUBSCRIPTION_ID") + azureAuth.TokenPayload.TenantID = os.Getenv("ARM_TENANT_ID") + azureAuth.TokenPayload.ClientID = os.Getenv("ARM_CLIENT_ID") + azureAuth.TokenPayload.ClientSecret = os.Getenv("ARM_CLIENT_SECRET") + option := GetIntegrationDBClientOptions() + //Hack + api, err := azureAuth.initWorkspaceAndGetClient(*option) + //err := azureAuth.getManagementToken(*option) + assert.NoError(t, err, err) + + instancePoolInfo, instancePoolErr := api.InstancePools().Create(model.InstancePool{ + InstancePoolName: "my_instance_pool", + MinIdleInstances: 0, + MaxCapacity: 10, + NodeTypeId: "Standard_DS3_v2", + IdleInstanceAutoTerminationMinutes: 20, + PreloadedSparkVersions: []string{ + "6.3.x-scala2.11", + }, + }) + log.Println(instancePoolInfo) + defer func() { + err := api.InstancePools().Delete(instancePoolInfo.InstancePoolId) + assert.NoError(t, err, err) + }() + + assert.NoError(t, instancePoolErr, instancePoolErr) +} diff --git a/db/provider.go b/db/provider.go new file mode 100644 index 000000000..8513ed5a1 --- /dev/null +++ b/db/provider.go @@ -0,0 +1,153 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stikkireddy/databricks-tf-provider/client" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "log" + "os" +) + +func Provider() terraform.ResourceProvider { + provider := &schema.Provider{ + ResourcesMap: map[string]*schema.Resource{ + "db_token": resourceToken(), + "db_secret_scope": resourceSecretScope(), + "db_secret": resourceSecret(), + "db_secret_acl": resourceSecretAcl(), + "db_instance_pool": resourceInstancePool(), + "db_scim_user": resourceScimUser(), + "db_scim_group": resourceScimGroup(), + }, + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "azure_auth": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "managed_resource_group": { + Type: schema.TypeString, + Required: true, + }, + "azure_region": { + Type: schema.TypeString, + Required: true, + }, + "workspace_name": { + Type: schema.TypeString, + Required: true, + }, + "resource_group": { + Type: schema.TypeString, + Required: true, + }, + "subscription_id": { + Type: schema.TypeString, + Required: true, + }, + "client_secret": { + Type: schema.TypeString, + Required: true, + }, + "client_id": { + Type: schema.TypeString, + Required: true, + }, + "tenant_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } + + provider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { + terraformVersion := provider.TerraformVersion + if terraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's missing it's 0.10 or 0.11 + terraformVersion = "0.11+compatible" + } + return providerConfigure(d, terraformVersion) + } + + return provider +} + +func providerConfigure(d *schema.ResourceData, s string) (interface{}, error) { + var option client.DBClientOption + if azureAuth, ok := d.GetOk("azure_auth"); !ok { + if host, ok := d.GetOk("host"); ok { + option.Host = host.(string) + } else { + option.Host = os.Getenv("HOST") + } + if token, ok := d.GetOk("token"); ok { + option.Token = token.(string) + } else { + option.Token = os.Getenv("TOKEN") + } + } else { + log.Println("Creating db client via azure auth!") + azureAuthMap := azureAuth.(map[string]interface{}) + //azureAuth AzureAuth{} + tokenPayload := TokenPayload{} + if managedResourceGroup, ok := azureAuthMap["managed_resource_group"].(string); ok { + tokenPayload.ManagedResourceGroup = managedResourceGroup + } + if azureRegion, ok := azureAuthMap["azure_region"].(string); ok { + tokenPayload.AzureRegion = azureRegion + } + if resourceGroup, ok := azureAuthMap["resource_group"].(string); ok { + tokenPayload.ResourceGroup = resourceGroup + } + if workspaceName, ok := azureAuthMap["workspace_name"].(string); ok { + tokenPayload.WorkspaceName = workspaceName + } + if subscriptionID, ok := azureAuthMap["subscription_id"].(string); ok { + tokenPayload.SubscriptionId = subscriptionID + } else { + tokenPayload.SubscriptionId = os.Getenv("ARM_SUBSCRIPTION_ID") + } + if clientSecret, ok := azureAuthMap["client_secret"].(string); ok { + tokenPayload.ClientSecret = clientSecret + } else { + tokenPayload.SubscriptionId = os.Getenv("ARM_CLIENT_SECRET") + } + if clientID, ok := azureAuthMap["client_id"].(string); ok { + tokenPayload.ClientID = clientID + } else { + tokenPayload.SubscriptionId = os.Getenv("ARM_CLIENT_ID") + } + if tenantID, ok := azureAuthMap["tenant_id"].(string); ok { + tokenPayload.TenantID = tenantID + } else { + tokenPayload.SubscriptionId = os.Getenv("ARM_TENANT_ID") + } + + azureAuthSetup := AzureAuth{ + TokenPayload: &tokenPayload, + ManagementToken: "", + AdbWorkspaceResourceID: "", + AdbAccessToken: "", + AdbPlatformToken: "", + } + log.Println("Running Azure Auth") + return azureAuthSetup.initWorkspaceAndGetClient(option) + } + + var dbClient service.DBApiClient + dbClient.Init(option) + return dbClient, nil +} diff --git a/db/provider_test.go b/db/provider_test.go new file mode 100644 index 000000000..9095af0e4 --- /dev/null +++ b/db/provider_test.go @@ -0,0 +1,29 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/joho/godotenv" + "log" + "os" + "testing" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "db": testAccProvider, + } +} + +func TestMain(m *testing.M) { + err := godotenv.Load("../.env") + if err != nil { + log.Println("Failed to load environment") + } + code := m.Run() + os.Exit(code) +} diff --git a/db/resource_db_instance_pool.go b/db/resource_db_instance_pool.go new file mode 100644 index 000000000..219e24fce --- /dev/null +++ b/db/resource_db_instance_pool.go @@ -0,0 +1,333 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "strconv" +) + +func resourceInstancePool() *schema.Resource { + return &schema.Resource{ + Create: resourceInstancePoolCreate, + Read: resourceInstancePoolRead, + Update: resourceInstancePoolUpdate, + Delete: resourceInstancePoolDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "instance_pool_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "min_idle_instances": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "max_capacity": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "idle_instance_autotermination_minutes": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "aws_attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "availability": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "zone_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "spot_bid_price_percent": { + Type: schema.TypeString, + Default: "100", + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + "node_type_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "default_tags": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + }, + "custom_tags": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + "enable_elastic_disk": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, + "disk_spec": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ebs_volume_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + "azure_disk_volume_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + "disk_count": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + "disk_size": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + }, + }, + }, + "preloaded_spark_versions": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + //TODO: Determine what this does from a state management perspective + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + } +} + +func convertMapStringInterfaceToStringString(m map[string]interface{}) map[string]string { + response := make(map[string]string) + for k, v := range m { + if v != nil { + response[k] = v.(string) + } + } + return response +} + +func resourceInstancePoolCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + + var instancePool model.InstancePool + var instancePoolAwsAttributes model.InstancePoolAwsAttributes + var instancePoolDiskSpec model.InstancePoolDiskSpec + var instancePoolDiskSpecDiskType model.InstancePoolDiskType + instancePool.InstancePoolName = d.Get("instance_pool_name").(string) + instancePool.MinIdleInstances = int32(d.Get("min_idle_instances").(int)) + instancePool.MaxCapacity = int32(d.Get("max_capacity").(int)) + instancePool.IdleInstanceAutoTerminationMinutes = int32(d.Get("idle_instance_autotermination_minutes").(int)) + + if awsAttributesSchema, ok := d.GetOk("aws_attributes"); ok { + awsAttributesMap := awsAttributesSchema.(map[string]interface{}) + if availability, ok := awsAttributesMap["availability"]; ok { + instancePoolAwsAttributes.Availability = model.AwsAvailability(availability.(string)) + } + if zoneId, ok := awsAttributesMap["zone_id"]; ok { + instancePoolAwsAttributes.ZoneId = zoneId.(string) + } + if spotBidPricePercent, ok := awsAttributesMap["spot_bid_price_percent"]; ok { + instancePoolAwsAttributes.SpotBidPricePercent = int32(spotBidPricePercent.(int)) + } else { + instancePoolAwsAttributes.SpotBidPricePercent = int32(100) + } + instancePool.AwsAttributes = &instancePoolAwsAttributes + } + + if nodeTypeId, ok := d.GetOk("node_type_id"); ok { + instancePool.NodeTypeId = nodeTypeId.(string) + } + + if customTags, ok := d.GetOk("custom_tags"); ok { + tags := customTags.(map[string]interface{}) + instancePool.CustomTags = convertMapStringInterfaceToStringString(tags) + } + + if enableElasticDisk, ok := d.GetOk("enable_elastic_disk"); ok { + instancePool.EnableElasticDisk = enableElasticDisk.(bool) + } + + if diskSpec, ok := d.GetOk("disk_spec"); ok { + diskSpecMap := diskSpec.(map[string]interface{}) + if ebsVolumeType, ok := diskSpecMap["ebs_volume_type"]; ok { + instancePoolDiskSpecDiskType.EbsVolumeType = ebsVolumeType.(string) + } + if azureDiskVolumeType, ok := diskSpecMap["azure_disk_volume_type"]; ok { + instancePoolDiskSpecDiskType.AzureDiskVolumeType = azureDiskVolumeType.(string) + } + instancePoolDiskSpec.DiskType = &instancePoolDiskSpecDiskType + + if diskCount, ok := diskSpecMap["disk_count"]; ok { + intVal, err := strconv.Atoi(diskCount.(string)) + if err != nil { + return err + } + instancePoolDiskSpec.DiskCount = int32(intVal) + } + if diskSize, ok := diskSpecMap["disk_size"]; ok { + intVal, err := strconv.Atoi(diskSize.(string)) + if err != nil { + return err + } + instancePoolDiskSpec.DiskSize = int32(intVal) + } + instancePool.DiskSpec = &instancePoolDiskSpec + } + + if sparkVersions, ok := d.GetOk("preloaded_spark_versions"); ok { + instancePool.PreloadedSparkVersions = sparkVersions.([]string) + } + + instancePoolInfo, err := client.InstancePools().Create(instancePool) + if err != nil { + return err + } + d.SetId(instancePoolInfo.InstancePoolId) + return resourceInstancePoolRead(d, m) +} + +func resourceInstancePoolRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + instancePoolInfo, err := client.InstancePools().Read(id) + if err != nil { + return err + } + + err = d.Set("instance_pool_name", instancePoolInfo.InstancePoolName) + if err != nil { + return err + } + err = d.Set("min_idle_instances", int(instancePoolInfo.MinIdleInstances)) + if err != nil { + return err + } + err = d.Set("max_capacity", int(instancePoolInfo.MaxCapacity)) + if err != nil { + return err + } + err = d.Set("idle_instance_autotermination_minutes", int(instancePoolInfo.IdleInstanceAutoTerminationMinutes)) + if instancePoolInfo.AwsAttributes != nil { + awsAttributes := map[string]interface{}{ + "availability": instancePoolInfo.AwsAttributes.Availability, + "zone_id": instancePoolInfo.AwsAttributes.ZoneId, + "spot_bid_price_percent": strconv.Itoa(int(instancePoolInfo.AwsAttributes.SpotBidPricePercent)), + } + err = d.Set("aws_attributes", awsAttributes) + if err != nil { + return err + } + } + err = d.Set("node_type_id", instancePoolInfo.NodeTypeId) + if err != nil { + return err + } + + err = d.Set("enable_elastic_disk", instancePoolInfo.EnableElasticDisk) + if err != nil { + return err + } + + if instancePoolInfo.DiskSpec != nil { + diskSpec := map[string]interface{}{} + if instancePoolInfo.DiskSpec.DiskType != nil { + + if instancePoolInfo.DiskSpec.DiskCount >= 0 { + diskSpec["disk_count"] = strconv.FormatInt(int64(instancePoolInfo.DiskSpec.DiskCount), 10) + } + if instancePoolInfo.DiskSpec.DiskSize >= 0 { + diskSpec["disk_size"] = strconv.FormatInt(int64(instancePoolInfo.DiskSpec.DiskSize), 10) + } + + } + if instancePoolInfo.DiskSpec.DiskType.EbsVolumeType != "" { + diskSpec["ebs_volume_type"] = instancePoolInfo.DiskSpec.DiskType.EbsVolumeType + } + if instancePoolInfo.DiskSpec.DiskType.AzureDiskVolumeType != "" { + diskSpec["azure_disk_volume_type"] = instancePoolInfo.DiskSpec.DiskType.AzureDiskVolumeType + } + err = d.Set("disk_spec", diskSpec) + if err != nil { + return err + } + } + + if len(instancePoolInfo.CustomTags) > 0 { + err = d.Set("custom_tags", instancePoolInfo.CustomTags) + } + + if len(instancePoolInfo.DefaultTags) > 0 { + err = d.Set("default_tags", instancePoolInfo.DefaultTags) + } + + if len(instancePoolInfo.PreloadedSparkVersions) > 0 { + err = d.Set("preloaded_spark_versions", instancePoolInfo.PreloadedSparkVersions) + } + d.SetId(id) + return err +} + +func resourceInstancePoolUpdate(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + + var instancePoolInfo model.InstancePoolInfo + instancePoolInfo.InstancePoolName = d.Get("instance_pool_name").(string) + instancePoolInfo.MinIdleInstances = int32(d.Get("min_idle_instances").(int)) + instancePoolInfo.MaxCapacity = int32(d.Get("max_capacity").(int)) + instancePoolInfo.IdleInstanceAutoTerminationMinutes = int32(d.Get("idle_instance_autotermination_minutes").(int)) + instancePoolInfo.InstancePoolId = id + instancePoolInfo.NodeTypeId = d.Get("node_type_id").(string) + + err := client.InstancePools().Update(instancePoolInfo) + if err != nil { + return err + } + return resourceInstancePoolUpdate(d, m) +} + +func resourceInstancePoolDelete(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + id := d.Id() + err := client.InstancePools().Delete(id) + return err +} diff --git a/db/resource_db_interactive_cluster_template.go b/db/resource_db_interactive_cluster_template.go new file mode 100644 index 000000000..d62527cf9 --- /dev/null +++ b/db/resource_db_interactive_cluster_template.go @@ -0,0 +1,42 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "log" +) + +func resourceCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceServerCreate, + Read: resourceServerRead, + Update: resourceServerUpdate, + Delete: resourceServerDelete, + + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceServerCreate(d *schema.ResourceData, m interface{}) error { + address := d.Get("address").(string) + log.Printf("Hello world testing") + log.Printf(m.(string)) + d.SetId(address) + return resourceServerRead(d, m) +} + +func resourceServerRead(d *schema.ResourceData, m interface{}) error { + return nil +} + +func resourceServerUpdate(d *schema.ResourceData, m interface{}) error { + return resourceServerRead(d, m) +} + +func resourceServerDelete(d *schema.ResourceData, m interface{}) error { + return nil +} diff --git a/db/resource_db_scim_group.go b/db/resource_db_scim_group.go new file mode 100644 index 000000000..d430a0cb0 --- /dev/null +++ b/db/resource_db_scim_group.go @@ -0,0 +1,136 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "log" +) + +func resourceScimGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceScimGroupCreate, + Update: resourceScimGroupUpdate, + Read: resourceScimGroupRead, + Delete: resourceScimGroupDelete, + + Schema: map[string]*schema.Schema{ + "display_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "members": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +//func convertInterfaceSliceToStringSlice(input []interface{}) []string { +// resp := []string{} +// for _, item := range input { +// resp = append(resp, item.(string)) +// } +// return resp +//} + +func resourceScimGroupCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + groupName := d.Get("display_name").(string) + var members []string + + if rMembers, ok := d.GetOk("members"); ok { + members = convertInterfaceSliceToStringSlice(rMembers.(*schema.Set).List()) + log.Println(members) + } + group, err := client.Groups().Create(groupName, members) + if err != nil { + return err + } + d.SetId(group.ID) + return resourceScimGroupRead(d, m) +} + +// +func getListOfMemberRefs(memberList []model.GroupMember) []string { + resp := []string{} + for _, member := range memberList { + resp = append(resp, member.Value) + } + return resp +} + +//func getListOfEntitlements(entitlementList []model.EntitlementsListItem) []string { +// resp := []string{} +// for _, entitlement := range entitlementList { +// resp = append(resp, string(entitlement.Value)) +// } +// return resp +//} + +func resourceScimGroupRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + group, err := client.Groups().Read(id) + if err != nil { + return err + } + + err = d.Set("members", getListOfMemberRefs(group.Members)) + if err != nil { + return err + } + + return err +} + +func diff(sliceA []string, sliceB []string) []string { + var output []string + m := make(map[string]int) + for _, y := range sliceB { + m[y]++ + } + for _, x := range sliceA { + if m[x] > 0 { + m[x]-- + continue + } + output = append(output, x) + } + return output +} + +func resourceScimGroupUpdate(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + var currentMembers []string + group, err := client.Groups().Read(id) + if err != nil { + return err + } + remoteMembers := getListOfMemberRefs(group.Members) + if members, ok := d.GetOk("members"); ok { + currentMembers = convertInterfaceSliceToStringSlice(members.(*schema.Set).List()) + } + addMembers := diff(currentMembers, remoteMembers) + removeMembers := diff(remoteMembers, currentMembers) + log.Println("add members") + log.Println(addMembers) + log.Println("remove members") + log.Println(removeMembers) + err = client.Groups().Update(group.ID, addMembers, removeMembers) + if err != nil { + return err + } + return resourceScimGroupRead(d, m) +} + +func resourceScimGroupDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + err := client.Groups().Delete(id) + return err +} diff --git a/db/resource_db_scim_group_test.go b/db/resource_db_scim_group_test.go new file mode 100644 index 000000000..687a70c06 --- /dev/null +++ b/db/resource_db_scim_group_test.go @@ -0,0 +1,28 @@ +package db + +import "testing" + +func diff(sliceA []string, sliceB []string) []string { + var output []string + m := make(map[string]int) + for _, y := range sliceB { + m[y]++ + } + for _, x := range sliceA { + if m[x] > 0 { + m[x]-- + continue + } + output = append(output, x) + } + return output +} + +func TestDiff(t *testing.T) { + tf_main := []string{"a", "b", "c"} + remote := []string{"b", "d"} + + t.Log(diff(tf_main, remote)) + t.Log(diff(remote, tf_main)) + +} diff --git a/db/resource_db_scim_user.go b/db/resource_db_scim_user.go new file mode 100644 index 000000000..80ad046b4 --- /dev/null +++ b/db/resource_db_scim_user.go @@ -0,0 +1,149 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "log" +) + +func resourceScimUser() *schema.Resource { + return &schema.Resource{ + Create: resourceScimUserCreate, + Update: resourceScimUserUpdate, + Read: resourceScimUserRead, + Delete: resourceScimUserDelete, + + Schema: map[string]*schema.Schema{ + "user_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "roles": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "entitlements": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + //Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func convertInterfaceSliceToStringSlice(input []interface{}) []string { + resp := []string{} + for _, item := range input { + resp = append(resp, item.(string)) + } + return resp +} + +func resourceScimUserCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + userName := d.Get("user_name").(string) + var displayName string + var roles []string + var entitlements []string + if rDisplayName, ok := d.GetOk("display_name"); ok { + displayName = rDisplayName.(string) + } + if rRoles, ok := d.GetOk("roles"); ok { + roles = convertInterfaceSliceToStringSlice(rRoles.(*schema.Set).List()) + log.Println(roles) + } + if rEntitlements, ok := d.GetOk("entitlements"); ok { + entitlements = convertInterfaceSliceToStringSlice(rEntitlements.(*schema.Set).List()) + log.Println(entitlements) + } + user, err := client.Users().Create(userName, displayName, roles, entitlements) + if err != nil { + return err + } + d.SetId(user.ID) + err = d.Set("entitlements", entitlements) + if err != nil { + return err + } + return resourceScimUserRead(d, m) +} + +func getListOfRoles(roleList []model.RoleListItem) []string { + resp := []string{} + for _, role := range roleList { + resp = append(resp, role.Value) + } + return resp +} +func getListOfEntitlements(entitlementList []model.EntitlementsListItem) []string { + resp := []string{} + for _, entitlement := range entitlementList { + resp = append(resp, string(entitlement.Value)) + } + return resp +} + +func resourceScimUserRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + user, err := client.Users().Read(id) + if err != nil { + return err + } + + roles := getListOfRoles(user.Roles) + //entitlements := getListOfEntitlements(user.Entitlements) + + err = d.Set("display_name", user.DisplayName) + if err != nil { + return err + } + err = d.Set("roles", roles) + if err != nil { + return err + } + + return err +} + +func resourceScimUserUpdate(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + userName := d.Get("user_name").(string) + var displayName string + var roles []string + var entitlements []string + if rDisplayName, ok := d.GetOk("display_name"); ok { + displayName = rDisplayName.(string) + } + if rRoles, ok := d.GetOk("roles"); ok { + + roles = convertInterfaceSliceToStringSlice(rRoles.(*schema.Set).List()) + } + if rEntitlements, ok := d.GetOk("entitlements"); ok { + entitlements = convertInterfaceSliceToStringSlice(rEntitlements.(*schema.Set).List()) + } + err := client.Users().Update(id, userName, displayName, roles, entitlements) + if err != nil { + return err + } + return resourceScimUserRead(d, m) +} + +func resourceScimUserDelete(d *schema.ResourceData, m interface{}) error { + id := d.Id() + client := m.(service.DBApiClient) + err := client.Users().Delete(id) + return err +} diff --git a/db/resource_db_secret.go b/db/resource_db_secret.go new file mode 100644 index 000000000..2edb7e184 --- /dev/null +++ b/db/resource_db_secret.go @@ -0,0 +1,121 @@ +package db + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "strings" +) + +func resourceSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretCreate, + Read: resourceSecretRead, + Delete: resourceSecretDelete, + + Schema: map[string]*schema.Schema{ + "string_value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + "scope": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +type SecretId map[string]string + +// go binary encoder +func toGOB64(m SecretId) (string, error) { + b := bytes.Buffer{} + e := gob.NewEncoder(&b) + err := e.Encode(m) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b.Bytes()), nil +} + +// go binary decoder +func fromGOB64(str string) (SecretId, error) { + m := SecretId{} + by, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return m, err + } + b := bytes.Buffer{} + b.Write(by) + d := gob.NewDecoder(&b) + err = d.Decode(&m) + if err != nil { + return m, err + } + return m, nil +} + +func getSecretId(scope string, key string) (string, error) { + return scope + "|||" + key, nil +} + +func getScopeAndKeyFromSecretId(secretIdString string) (string, string, error) { + return strings.Split(secretIdString, "|||")[0], strings.Split(secretIdString, "|||")[1], nil +} + +func resourceSecretCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + scopeName := d.Get("scope").(string) + key := d.Get("key").(string) + secretValue := d.Get("string_value").(string) + err := client.Secrets().Create(secretValue, scopeName, key) + if err != nil { + return err + } + id, err := getSecretId(scopeName, key) + if err != nil { + return err + } + d.SetId(id) + return resourceSecretRead(d, m) +} + +func resourceSecretRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + scope, key, err := getScopeAndKeyFromSecretId(id) + if err != nil { + return err + } + err = d.Set("scope", scope) + if err != nil { + return err + } + err = d.Set("key", key) + if err != nil { + return err + } + d.SetId(id) + return nil +} + +func resourceSecretDelete(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + id := d.Id() + scope, key, err := getScopeAndKeyFromSecretId(id) + if err != nil { + return err + } + err = client.Secrets().Delete(scope, key) + return err +} diff --git a/db/resource_db_secret_acl.go b/db/resource_db_secret_acl.go new file mode 100644 index 000000000..3332cb179 --- /dev/null +++ b/db/resource_db_secret_acl.go @@ -0,0 +1,96 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "strings" +) + +func resourceSecretAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretAclCreate, + Read: resourceSecretAclRead, + Delete: resourceSecretAclDelete, + + Schema: map[string]*schema.Schema{ + "scope": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "principal": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "permission": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +type SecretAclId map[string]string + +func getSecretAclId(scope string, key string) (string, error) { + return scope + "|||" + key, nil +} + +func getScopeAndKeyFromSecretAclId(SecretAclIdString string) (string, string, error) { + return strings.Split(SecretAclIdString, "|||")[0], strings.Split(SecretAclIdString, "|||")[1], nil +} + +func resourceSecretAclCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + scopeName := d.Get("scope").(string) + principal := d.Get("principal").(string) + permission := model.AclPermission(d.Get("permission").(string)) + err := client.SecretAcls().Create(scopeName, principal, permission) + if err != nil { + return err + } + id, err := getSecretAclId(scopeName, principal) + if err != nil { + return err + } + d.SetId(id) + return resourceSecretAclRead(d, m) +} + +func resourceSecretAclRead(d *schema.ResourceData, m interface{}) error { + id := d.Id() + scope, principal, err := getScopeAndKeyFromSecretAclId(id) + if err != nil { + return err + } + err = d.Set("scope", scope) + if err != nil { + return err + } + err = d.Set("principal", principal) + if err != nil { + return err + } + + client := m.(service.DBApiClient) + secretAcl, err := client.SecretAcls().Read(scope, principal) + if err != nil { + return err + } + err = d.Set("permission", secretAcl.Permission) + return err +} + +func resourceSecretAclDelete(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + id := d.Id() + scope, key, err := getScopeAndKeyFromSecretAclId(id) + if err != nil { + return err + } + err = client.SecretAcls().Delete(scope, key) + return err +} diff --git a/db/resource_db_secret_scope.go b/db/resource_db_secret_scope.go new file mode 100644 index 000000000..c5582ceaf --- /dev/null +++ b/db/resource_db_secret_scope.go @@ -0,0 +1,67 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/service" +) + +func resourceSecretScope() *schema.Resource { + return &schema.Resource{ + Create: resourceSecretScopeCreate, + Read: resourceSecretScopeRead, + Delete: resourceSecretScopeDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "initial_manage_principal": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + "backend_type": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSecretScopeCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + scopeName := d.Get("name").(string) + initialManagePrincipal := d.Get("initial_manage_principal").(string) + err := client.SecretScopes().Create(scopeName, initialManagePrincipal) + if err != nil { + return err + } + d.SetId(scopeName) + return resourceSecretScopeRead(d, m) +} + +func resourceSecretScopeRead(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + id := d.Id() + scope, err := client.SecretScopes().Read(id) + if err != nil { + return err + } + d.SetId(scope.Name) + err = d.Set("name", scope.Name) + if err != nil { + return err + } + err = d.Set("backend_type", scope.BackendType) + return err +} + +func resourceSecretScopeDelete(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + id := d.Id() + err := client.SecretScopes().Delete(id) + return err +} diff --git a/db/resource_db_token.go b/db/resource_db_token.go new file mode 100644 index 000000000..1bec5f26e --- /dev/null +++ b/db/resource_db_token.go @@ -0,0 +1,80 @@ +package db + +import ( + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/stikkireddy/databricks-tf-provider/client/service" +) + +func resourceToken() *schema.Resource { + return &schema.Resource{ + Create: resourceTokenCreate, + Read: resourceTokenRead, + Delete: resourceTokenDelete, + + Schema: map[string]*schema.Schema{ + "lifetime_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 0, + }, + "comment": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "", + }, + "creation_time": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "token_value": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "expiry_time": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceTokenCreate(d *schema.ResourceData, m interface{}) error { + client := m.(service.DBApiClient) + lifeTimeSeconds := d.Get("lifetime_seconds").(int) + comment := d.Get("comment").(string) + tokenResp, err := client.Tokens().Create(int32(lifeTimeSeconds), comment) + if err != nil { + return err + } + d.SetId(tokenResp.TokenInfo.TokenID) + err = d.Set("token_value", tokenResp.TokenValue) + if err != nil { + return err + } + return resourceTokenRead(d, m) +} +func resourceTokenRead(d *schema.ResourceData, m interface{}) error { + tokenId := d.Id() + client := m.(service.DBApiClient) + token, err := client.Tokens().Read(tokenId) + if err != nil { + return err + } + err = d.Set("creation_time", token.CreationTime) + if err != nil { + return err + } + err = d.Set("expiry_time", token.ExpiryTime) + return err +} +func resourceTokenDelete(d *schema.ResourceData, m interface{}) error { + tokenId := d.Id() + client := m.(service.DBApiClient) + err := client.Tokens().Delete(tokenId) + return err +} diff --git a/db/resource_db_token_test.go b/db/resource_db_token_test.go new file mode 100644 index 000000000..b88f67042 --- /dev/null +++ b/db/resource_db_token_test.go @@ -0,0 +1,106 @@ +package db + +import ( + "errors" + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stikkireddy/databricks-tf-provider/client/model" + "github.com/stikkireddy/databricks-tf-provider/client/service" + "testing" +) + +func TestAccTokenResource(t *testing.T) { + var tokenInfo model.TokenInfo + + // generate a random name for each tokenInfo test run, to avoid + // collisions from multiple concurrent tests. + // the acctest package includes many helpers such as RandStringFromCharSet + // See https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/acctest + rComment := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckTokenResourceDestroy, + Steps: []resource.TestStep{ + { + // use a dynamic configuration with the random name from above + Config: testAccTokenResource(rComment), + // compose a basic test, checking both remote and local values + Check: resource.ComposeTestCheckFunc( + // query the API to retrieve the tokenInfo object + testAccCheckTokenResourceExists("db_token.my-token", &tokenInfo, t), + // verify remote values + testAccCheckTokenValues(&tokenInfo, rComment), + // verify local values + resource.TestCheckResourceAttr("db_token.my-token", "lifetime_seconds", "6000"), + resource.TestCheckResourceAttr("db_token.my-token", "comment", rComment), + ), + }, + }, + }) +} + +func testAccCheckTokenResourceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(service.DBApiClient) + for _, rs := range s.RootModule().Resources { + if rs.Type != "db_token" { + continue + } + _, err := conn.Tokens().Read(rs.Primary.ID) + if err != nil { + return nil + } + return errors.New("Resource token is not cleaned up!") + } + return nil +} + +func testAccPreCheck(t *testing.T) { + return +} + +func testAccCheckTokenValues(tokenInfo *model.TokenInfo, comment string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if tokenInfo.Comment != comment { + return errors.New("The comment for the token created does not equal the value passed in!") + } + return nil + } +} + +// testAccCheckTokenResourceExists queries the API and retrieves the matching Widget. +func testAccCheckTokenResourceExists(n string, tokenInfo *model.TokenInfo, t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + // find the corresponding state object + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // retrieve the configured client from the test setup + conn := testAccProvider.Meta().(service.DBApiClient) + resp, err := conn.Tokens().Read(rs.Primary.ID) + t.Log(resp) + if err != nil { + return err + } + + // If no error, assign the response Widget attribute to the widget pointer + *tokenInfo = resp + return nil + //return fmt.Errorf("Token (%s) not found", rs.Primary.ID) + } +} + +// testAccTokenResource returns an configuration for an Example Widget with the provided name +func testAccTokenResource(comment string) string { + return fmt.Sprintf(` + resource "db_token" "my-token" { + lifetime_seconds = 6000 + comment = "%v" + } + `, comment) +} diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..d0c3cbf10 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 000000000..9534b0181 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..b967b0b3e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,29 @@ +alabaster==0.7.12 +Babel==2.8.0 +certifi==2019.11.28 +chardet==3.0.4 +commonmark==0.9.1 +docutils==0.16 +idna==2.8 +imagesize==1.2.0 +Jinja2==2.11.1 +m2r==0.2.1 +MarkupSafe==1.1.1 +mistune==0.8.4 +packaging==20.1 +Pygments==2.5.2 +pyparsing==2.4.6 +pytz==2019.3 +recommonmark==0.6.0 +requests==2.22.0 +six==1.14.0 +snowballstemmer==2.0.0 +Sphinx==2.3.1 +sphinx-rtd-theme==0.4.3 +sphinxcontrib-applehelp==1.0.1 +sphinxcontrib-devhelp==1.0.1 +sphinxcontrib-htmlhelp==1.0.2 +sphinxcontrib-jsmath==1.0.1 +sphinxcontrib-qthelp==1.0.2 +sphinxcontrib-serializinghtml==1.1.3 +urllib3==1.25.8 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 000000000..f99bd63ab --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,57 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'Databricks Terraform Provider' +copyright = '2020, databricks' +author = 'databricks' + +# The full version, including alpha/beta/rc tags +release = '0.1.0' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ +# "recommonmark", + "m2r" +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 000000000..2fc754244 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,17 @@ +.. databricks-terraform-provider documentation master file, created by + sphinx-quickstart on Wed Jan 29 15:36:15 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to databricks-terraform-provider's documentation! +========================================================= + +Click on the installation section to get started with installing the provider. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + Databricks Provider Overview + Installation + Provider Details \ No newline at end of file diff --git a/docs/source/provider/installation.md b/docs/source/provider/installation.md new file mode 100644 index 000000000..570f1fc44 --- /dev/null +++ b/docs/source/provider/installation.md @@ -0,0 +1,35 @@ +# Databricks Terraform Provider Installation + +A Terraform provider for Databricks workspace components + +## Installing `databricks-tf-provider` with Go + +* Install Go 1.13. For previous versions, you may have to set your `$GOPATH` manually, if you haven't done it yet visit [here](https://golang.org/doc/install). +* Install Terraform 0.12.x [from here](https://www.terraform.io/downloads.html) and save it into `/usr/local/bin/terraform` folder (create it if it doesn't exists). This provider DOES NOT SUPPORT Terraform 0.12 or above. +* Download the code by issuing a `go get` command. + +```bash +# Download the source code for databricks-tf-provider +# and build the needed binary, by saving it inside $GOPATH/bin +$ go get -u github.com/stikkireddy/databricks-tf-provider + +# After fetching the code base we will switch into the directory for the code base. +$ cd $GOPATH/src/github.com/stikkireddy/databricks-tf-provider + +# Once in the directory you will run the build using the make command provided by the make file +$ make build + +# Once the file is made we will then move the file to where terraform can pick it up +$ mv terraform-provider-db /usr/local/bin/ +``` + +If you wish to uninstall the binary simply remove the file from the directory. + +```bash +$ rm /usr/local/bin/terraform-provider-db +``` + +## Using `databricks-tf-provider` with Docker (TODO!!) + + + diff --git a/docs/source/provider/provider.md b/docs/source/provider/provider.md new file mode 100644 index 000000000..f51d4f250 --- /dev/null +++ b/docs/source/provider/provider.md @@ -0,0 +1,23 @@ +# Databricks Provider + +> TODO + +## Example Usage + + + +## Authentication + +### Azure Auth + +### AWS Auth + +### Static credentials + + +### Environment variables + + +## Provider Argument Reference + + diff --git a/docs/source/readme_link.rst b/docs/source/readme_link.rst new file mode 100644 index 000000000..8962b509a --- /dev/null +++ b/docs/source/readme_link.rst @@ -0,0 +1,3 @@ + +.. mdinclude:: ../../README.md + diff --git a/examples/simple-azure-tf-deployment/main.tf b/examples/simple-azure-tf-deployment/main.tf new file mode 100644 index 000000000..a743f38f6 --- /dev/null +++ b/examples/simple-azure-tf-deployment/main.tf @@ -0,0 +1,115 @@ +variable "client_id" { + type = string +} +variable "client_secret" { + type = string +} +variable "tenant_id" { + type = string +} +variable "subscription_id" { + type = string +} +variable "token" { + type = string +} +variable "host" { + type = string +} +variable "resource_group" { + type = string +} +variable "managed_resource_group_name" { + type = string +} + +provider "azurerm" { + client_id = var.client_id + client_secret = var.client_secret + tenant_id = var.tenant_id + subscription_id = var.subscription_id +} + +resource "azurerm_databricks_workspace" "demo_test_workspace" { + location = "centralus" + name = "terraform-test-ws-6" + resource_group_name = var.resource_group + managed_resource_group_name = var.managed_resource_group_name + sku = "premium" +} + + + +provider "db" { + azure_auth = { + managed_resource_group = azurerm_databricks_workspace.demo_test_workspace.managed_resource_group_name + azure_region = azurerm_databricks_workspace.demo_test_workspace.location + workspace_name = azurerm_databricks_workspace.demo_test_workspace.name + resource_group = azurerm_databricks_workspace.demo_test_workspace.resource_group_name + client_id = var.client_id + client_secret = var.client_secret + tenant_id = var.tenant_id + subscription_id = var.subscription_id + } +} + +// + +resource "db_scim_user" "my-user" { + count = 2 + user_name = join("", ["demo-test-user", "+",count.index,"@databricks.com"]) + display_name = "demo Test User" + entitlements = [ + "allow-cluster-create", + ] +} + +resource "db_scim_group" "my-group" { + display_name = "demo Test Group" + members = [db_scim_user.my-user[0].id] +} +resource "db_secret_scope" "my-scope" { + name = "terraform-demo-scope2" +} + +resource "db_secret" "test_secret" { + key = "demo-test-secret-1" + string_value = "hello world 123" + scope = db_secret_scope.my-scope.name +} + +resource "db_secret_acl" "my-acl" { + principal = "USERS" + permission = "READ" + scope = db_secret_scope.my-scope.name +} + +resource "db_instance_pool" "my-pool" { + instance_pool_name = "demo-terraform-pool" + min_idle_instances = 0 + max_capacity = 5 + node_type_id = "Standard_DS3_v2" + idle_instance_autotermination_minutes = 10 + disk_spec = { + ebs_volume_type = "GENERAL_PURPOSE_SSD" + disk_size = 80 + disk_count = 1 + } + custom_tags = { + "creator": "demo user" + "testChange": "demo user" + } +} + +resource "db_token" "my-token" { + lifetime_seconds = 6000 + comment = "Testing terraform v2" +} + +output "db_user_ids" { + value = db_scim_user.my-user[*].id +} + +output "db_scope" { + value = db_secret_scope.my-scope.name +} diff --git a/examples/simple-azure-tf-deployment/terraform.tfvars.template b/examples/simple-azure-tf-deployment/terraform.tfvars.template new file mode 100644 index 000000000..ea716be31 --- /dev/null +++ b/examples/simple-azure-tf-deployment/terraform.tfvars.template @@ -0,0 +1,6 @@ +client_id="azure-enterprise-app-client-id" +client_secret="azure-enterprise-app-secret" +tenant_id="azure active directory tenant id" +subscription_id="azure subscription of workspace" +resource_group="azure resource group for workspace" +managed_resource_group_name="managed resource group for the azure databricks workspace" \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..3edcbc861 --- /dev/null +++ b/go.mod @@ -0,0 +1,12 @@ +module github.com/stikkireddy/databricks-tf-provider + +go 1.13 + +require ( + github.com/betacraft/easytags v1.0.2 // indirect + github.com/google/go-querystring v1.0.0 + github.com/hashicorp/terraform-plugin-sdk v1.6.0 + github.com/joho/godotenv v1.3.0 + github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a + github.com/stretchr/testify v1.3.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..48afdb39d --- /dev/null +++ b/go.sum @@ -0,0 +1,275 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= +github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.25.3 h1:uM16hIw9BotjZKMZlX05SN2EFtaWfi/NonPKIARiBLQ= +github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/betacraft/easytags v1.0.2/go.mod h1:g3FkpCCFj2YbU+JDDXfumrmJpxj0ZJ/+szwQM5JkWZ8= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw= +github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl/v2 v2.0.0 h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8= +github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8 h1:+RyjwU+Gnd/aTJBPZVDNm903eXVjjqhbaR4Ypx3xYyY= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= +github.com/hashicorp/terraform-plugin-sdk v1.6.0 h1:Um5hsAL7kKsfTHtan8lybY/d03F2bHu4fjRB1H6Ag4U= +github.com/hashicorp/terraform-plugin-sdk v1.6.0/go.mod h1:H5QLx/uhwfxBZ59Bc5SqT19M4i+fYt7LZjHTpbLZiAg= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596 h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6GvtgrMub2ci4ZLQMvTaYIba2lfdc= +github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8= +github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/main.go b/main.go new file mode 100644 index 000000000..106d25923 --- /dev/null +++ b/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/stikkireddy/databricks-tf-provider/db" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: func() terraform.ResourceProvider { + return db.Provider() + }, + }) +} diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000..4ff4e2f1c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,526 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Email calls Client.Email on the default client. +func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// Email returns the email address associated with the service account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Email(serviceAccount string) (string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 000000000..5232cb673 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,315 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "context" + "fmt" + "time" + + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + var proto *pb.Policy + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) +} + +// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// grpc service that implements IAM as a mixin +func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: c}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} + +// insertMetadata inserts metadata into the given context +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 000000000..6435695ba --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go new file mode 100644 index 000000000..72780f764 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional.go @@ -0,0 +1,108 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optional provides versions of primitive types that can +// be nil. These are useful in methods that update some of an API object's +// fields. +package optional + +import ( + "fmt" + "strings" + "time" +) + +type ( + // Bool is either a bool or nil. + Bool interface{} + + // String is either a string or nil. + String interface{} + + // Int is either an int or nil. + Int interface{} + + // Uint is either a uint or nil. + Uint interface{} + + // Float64 is either a float64 or nil. + Float64 interface{} + + // Duration is either a time.Duration or nil. + Duration interface{} +) + +// ToBool returns its argument as a bool. +// It panics if its argument is nil or not a bool. +func ToBool(v Bool) bool { + x, ok := v.(bool) + if !ok { + doPanic("Bool", v) + } + return x +} + +// ToString returns its argument as a string. +// It panics if its argument is nil or not a string. +func ToString(v String) string { + x, ok := v.(string) + if !ok { + doPanic("String", v) + } + return x +} + +// ToInt returns its argument as an int. +// It panics if its argument is nil or not an int. +func ToInt(v Int) int { + x, ok := v.(int) + if !ok { + doPanic("Int", v) + } + return x +} + +// ToUint returns its argument as a uint. +// It panics if its argument is nil or not a uint. +func ToUint(v Uint) uint { + x, ok := v.(uint) + if !ok { + doPanic("Uint", v) + } + return x +} + +// ToFloat64 returns its argument as a float64. +// It panics if its argument is nil or not a float64. +func ToFloat64(v Float64) float64 { + x, ok := v.(float64) + if !ok { + doPanic("Float64", v) + } + return x +} + +// ToDuration returns its argument as a time.Duration. +// It panics if its argument is nil or not a time.Duration. +func ToDuration(v Duration) time.Duration { + x, ok := v.(time.Duration) + if !ok { + doPanic("Duration", v) + } + return x +} + +func doPanic(capType string, v interface{}) { + panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) +} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 000000000..7a7b4c205 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return Annotatef(lastErr, "retry failed with %v; last error", cerr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go new file mode 100644 index 000000000..66dc39116 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -0,0 +1,109 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "fmt" + + "go.opencensus.io/trace" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/status" +) + +// StartSpan adds a span to the trace with the given name. +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +// EndSpan ends a span with the given error. +func EndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(toStatus(err)) + } + span.End() +} + +// toStatus interrogates an error and converts it to an appropriate +// OpenCensus status. +func toStatus(err error) trace.Status { + if err2, ok := err.(*googleapi.Error); ok { + return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} + } else if s, ok := status.FromError(err); ok { + return trace.Status{Code: int32(s.Code()), Message: s.Message()} + } else { + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} + } +} + +// TODO(deklerk): switch to using OpenCensus function when it becomes available. +// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto +func httpStatusCodeToOCCode(httpStatusCode int) int32 { + switch httpStatusCode { + case 200: + return int32(code.Code_OK) + case 499: + return int32(code.Code_CANCELLED) + case 500: + return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS + case 400: + return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE + case 504: + return int32(code.Code_DEADLINE_EXCEEDED) + case 404: + return int32(code.Code_NOT_FOUND) + case 409: + return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED + case 403: + return int32(code.Code_PERMISSION_DENIED) + case 401: + return int32(code.Code_UNAUTHENTICATED) + case 429: + return int32(code.Code_RESOURCE_EXHAUSTED) + case 501: + return int32(code.Code_UNIMPLEMENTED) + case 503: + return int32(code.Code_UNAVAILABLE) + default: + return int32(code.Code_UNKNOWN) + } +} + +// TODO: (odeke-em): perhaps just pass around spans due to the cost +// incurred from using trace.FromContext(ctx) yet we could avoid +// throwing away the work done by ctx, span := trace.StartSpan. +func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + trace.FromContext(ctx).Annotatef(attrs, format, args...) +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100644 index 000000000..d7c5a3e21 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 000000000..d291921b1 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20190802" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md new file mode 100644 index 000000000..a2253c4bb --- /dev/null +++ b/vendor/cloud.google.com/go/storage/README.md @@ -0,0 +1,32 @@ +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage](https://cloud.google.com/storage/) +- [API documentation](https://cloud.google.com/storage/docs) +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +[snip]:# (storage-1) +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (storage-2) +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 000000000..7855d110a --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,335 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "net/http" + "reflect" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a +// Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + EntityID string + Role ACLRole + Domain string + Email string + ProjectTeam *ProjectTeam +} + +// ProjectTeam is the project team associated with the entity, if any. +type ProjectTeam struct { + ProjectNumber string + Team string +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the role for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(ctx, req) + _, err := req.Do() + return err + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(ctx, req) + return runWithRetry(ctx, func() error { + _, err := req.Do() + return err + }) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + +func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRule(item)) + } + return rs +} + +func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRule(item)) + } + return rs +} + +func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toObjectProjectTeam(a.ProjectTeam), + } +} + +func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toBucketProjectTeam(a.ProjectTeam), + } +} + +func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary + } + return r +} + +func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary + } + return r +} + +func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { + return &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { + return &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} + +func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 000000000..07c470d3e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,1195 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newDeleteCall() + if err != nil { + return err + } + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + }, + gen: -1, + userProject: b.userProject, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newGetCall() + if err != nil { + return nil, err + } + var resp *raw.Bucket + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} + +func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// Update updates a bucket's attributes. +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newPatchCall(&uattrs) + if err != nil { + return nil, err + } + if uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + // TODO(jba): retry iff metagen is set? + rb, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + return newBucket(rb) +} + +func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { + rb := uattrs.toRawBucket() + req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly BucketPolicyOnly + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. It defaults to false. + DefaultEventBasedHold bool + + // If not empty, applies a predefined set of access controls. It should be set + // only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // It should be set only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedDefaultObjectACL string + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + // + // "DURABLE_REDUCED_AVAILABILITY", "MULTI_REGIONAL" and "REGIONAL" + // are considered legacy storage classes. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS + + // The encryption configuration used by default for newly inserted objects. + Encryption *BucketEncryption + + // The logging configuration. + Logging *BucketLogging + + // The website configuration. + Website *BucketWebsite + + // Etag is the HTTP/1.1 Entity tag for the bucket. + // This field is read-only. + Etag string + + // LocationType describes how data is stored and replicated. + // Typical values are "multi-region", "region" and "dual-region". + // This field is read-only. + LocationType string +} + +// BucketPolicyOnly configures access checks to use only bucket-level IAM +// policies. +type BucketPolicyOnly struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +// RetentionPolicy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time + + // IsLocked describes whether the bucket is locked. Once locked, an + // object retention policy cannot be modified. + // This field is read-only. + IsLocked bool +} + +const ( + // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precedence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are "Delete" to delete matching objects and + // "SetStorageClass" to set the storage class defined in StorageClass on + // matching objects. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AgeInDays is the age of the object in days. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", + // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + MatchesStorageClasses []string + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + NumNewerVersions int64 +} + +// BucketLogging holds the bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // The destination bucket where the current bucket's logs + // should be placed. + LogBucket string + + // A prefix for log object names. + LogObjectPrefix string +} + +// BucketWebsite holds the bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See +// https://cloud.google.com/storage/docs/static-website for more information. +type BucketWebsite struct { + // If the requested object path is missing, the service will ensure the path has + // a trailing '/', append this suffix, and attempt to retrieve the resulting + // object. This allows the creation of index.html objects to represent directory + // pages. + MainPageSuffix string + + // If the requested object path is missing, and any mainPageSuffix object is + // missing, if applicable, the service will return the named object from this + // bucket as the content for a 404 Not Found result. + NotFoundPage string +} + +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { + if b == nil { + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err + } + return &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + Etag: b.Etag, + LocationType: b.LocationType, + }, nil +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + var bktIAM *raw.BucketIamConfiguration + if b.BucketPolicyOnly.Enabled { + bktIAM = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: true, + }, + } + } + return &raw.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + } +} + +// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + +// BucketEncryption is a bucket's encryption configuration. +type BucketEncryption struct { + // A Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt + // objects inserted into this bucket, if no encryption method is specified. + // The key's location must be the same as the bucket's. + DefaultKMSKeyName string +} + +// BucketAttrsToUpdate define the attributes to update during an Update call. +type BucketAttrsToUpdate struct { + // If set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // If set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. + DefaultEventBasedHold optional.Bool + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly *BucketPolicyOnly + + // If set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // If set, replaces the CORS configuration with a new configuration. + // An empty (rather than nil) slice causes all CORS policies to be removed. + CORS []CORS + + // If set, replaces the encryption configuration of the bucket. Using + // BucketEncryption.DefaultKMSKeyName = "" will delete the existing + // configuration. + Encryption *BucketEncryption + + // If set, replaces the lifecycle configuration of the bucket. + Lifecycle *Lifecycle + + // If set, replaces the logging configuration of the bucket. + Logging *BucketLogging + + // If set, replaces the website configuration of the bucket. + Website *BucketWebsite + + // If not empty, applies a predefined set of access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedDefaultObjectACL string + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.DefaultEventBasedHold != nil { + rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.BucketPolicyOnly != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: ua.BucketPolicyOnly.Enabled, + }, + } + } + if ua.Encryption != nil { + if ua.Encryption.DefaultKMSKeyName == "" { + rb.NullFields = append(rb.NullFields, "Encryption") + rb.Encryption = nil + } else { + rb.Encryption = ua.Encryption.toRawBucketEncryption() + } + } + if ua.Lifecycle != nil { + rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + } + if ua.Logging != nil { + if *ua.Logging == (BucketLogging{}) { + rb.NullFields = append(rb.NullFields, "Logging") + rb.Logging = nil + } else { + rb.Logging = ua.Logging.toRawBucketLogging() + } + } + if ua.Website != nil { + if *ua.Website == (BucketWebsite{}) { + rb.NullFields = append(rb.NullFields, "Website") + rb.Website = nil + } else { + rb.Website = ua.Website.toRawBucketWebsite() + } + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + rb.Acl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "Acl") + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + rb.DefaultObjectAcl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") + } + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + var metageneration int64 + if b.conds != nil { + metageneration = b.conds.MetagenerationMatch + } + req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) + _, err := req.Context(ctx).Do() + return err +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + IsLocked: rp.IsLocked, + }, nil +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + return nil + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: r.Condition.AgeInDays, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + AgeInDays: rr.Condition.Age, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + + switch { + case rr.Condition.IsLive == nil: + r.Condition.Liveness = LiveAndArchived + case *rr.Condition.IsLive == true: + r.Condition.Liveness = Live + case *rr.Condition.IsLive == false: + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { + if e == nil { + return nil + } + return &raw.BucketEncryption{ + DefaultKmsKeyName: e.DefaultKMSKeyName, + } +} + +func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} +} + +func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { + if b == nil { + return nil + } + return &raw.BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func toBucketLogging(b *raw.BucketLogging) *BucketLogging { + if b == nil { + return nil + } + return &BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { + if w == nil { + return nil + } + return &raw.BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { + if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { + return BucketPolicyOnly{} + } + lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) + if err != nil { + return BucketPolicyOnly{ + Enabled: true, + } + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: lt, + } +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 000000000..52162e72d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,228 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, + // that will be used to encrypt the object. Overrides the object's KMSKeyName, if + // any. + // + // Providing both a DestinationKMSKeyName and a customer-supplied encryption key + // (via ObjectHandle.Key) on the destination object will result in an error when + // Run is called. + DestinationKMSKeyName string + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { + return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } +} + +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if c.DestinationKMSKeyName != "" { + call.DestinationKmsKeyName(c.DestinationKMSKeyName) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 000000000..88f645904 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,176 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +See https://godoc.org/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + +All of the methods of this package use exponential backoff to retry calls that fail +with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues +indefinitely unless the controlling context is canceled or the client is closed. See +context.WithTimeout and context.WithCancel. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go io.Reader +and io.Writer interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Errors + +Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). +These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: + + if e, ok := err.(*googleapi.Error); ok { + if e.Code == 409 { ... } + } +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go new file mode 100644 index 000000000..206813f0c --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -0,0 +1,32 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.10 + +package storage + +import "google.golang.org/api/googleapi" + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go new file mode 100644 index 000000000..4a5c1b512 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -0,0 +1,330 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "time" + + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// HMACState is the state of the HMAC key. +type HMACState string + +const ( + // Active is the status for an active key that can be used to sign + // requests. + Active HMACState = "ACTIVE" + + // Inactive is the status for an inactive key thus requests signed by + // this key will be denied. + Inactive HMACState = "INACTIVE" + + // Deleted is the status for a key that is deleted. + // Once in this state the key cannot key cannot be recovered + // and does not count towards key limits. Deleted keys will be cleaned + // up later. + Deleted HMACState = "DELETED" +) + +// HMACKey is the representation of a Google Cloud Storage HMAC key. +// +// HMAC keys are used to authenticate signed access to objects. To enable HMAC key +// authentication, please visit https://cloud.google.com/storage/docs/migrating. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. +type HMACKey struct { + // The HMAC's secret key. + Secret string + + // AccessID is the ID of the HMAC key. + AccessID string + + // Etag is the HTTP/1.1 Entity tag. + Etag string + + // ID is the ID of the HMAC key, including the ProjectID and AccessID. + ID string + + // ProjectID is the ID of the project that owns the + // service account to which the key authenticates. + ProjectID string + + // ServiceAccountEmail is the email address + // of the key's associated service account. + ServiceAccountEmail string + + // CreatedTime is the creation time of the HMAC key. + CreatedTime time.Time + + // UpdatedTime is the last modification time of the HMAC key metadata. + UpdatedTime time.Time + + // State is the state of the HMAC key. + // It can be one of StateActive, StateInactive or StateDeleted. + State HMACState +} + +// HMACKeyHandle helps provide access and management for HMAC keys. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. +type HMACKeyHandle struct { + projectID string + accessID string + + raw *raw.ProjectsHmacKeysService +} + +// HMACKeyHandle creates a handle that will be used for HMACKey operations. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { + return &HMACKeyHandle{ + projectID: projectID, + accessID: accessID, + raw: raw.NewProjectsHmacKeysService(c.raw), + } +} + +// Get invokes an RPC to retrieve the HMAC key referenced by the +// HMACKeyHandle's accessID. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) { + call := hkh.raw.Get(hkh.projectID, hkh.accessID) + setClientHeader(call.Header()) + + var metadata *raw.HmacKeyMetadata + var err error + err = runWithRetry(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + + hkPb := &raw.HmacKey{ + Metadata: metadata, + } + return pbHmacKeyToHMACKey(hkPb, false) +} + +// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. +// Only inactive HMAC keys can be deleted. +// After deletion, a key cannot be used to authenticate requests. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (hkh *HMACKeyHandle) Delete(ctx context.Context) error { + delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) + setClientHeader(delCall.Header()) + + return runWithRetry(ctx, func() error { + return delCall.Context(ctx).Do() + }) +} + +func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) { + pbmd := pb.Metadata + if pbmd == nil { + return nil, errors.New("field Metadata cannot be nil") + } + createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated) + if err != nil { + return nil, fmt.Errorf("field CreatedTime: %v", err) + } + updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated) + if err != nil && !updatedTimeCanBeNil { + return nil, fmt.Errorf("field UpdatedTime: %v", err) + } + + hmk := &HMACKey{ + AccessID: pbmd.AccessId, + Secret: pb.Secret, + Etag: pbmd.Etag, + ID: pbmd.Id, + State: HMACState(pbmd.State), + ProjectID: pbmd.ProjectId, + CreatedTime: createdTime, + UpdatedTime: updatedTime, + + ServiceAccountEmail: pbmd.ServiceAccountEmail, + } + + return hmk, nil +} + +// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) { + if projectID == "" { + return nil, errors.New("storage: expecting a non-blank projectID") + } + if serviceAccountEmail == "" { + return nil, errors.New("storage: expecting a non-blank service account email") + } + + svc := raw.NewProjectsHmacKeysService(c.raw) + call := svc.Create(projectID, serviceAccountEmail) + setClientHeader(call.Header()) + + var hkPb *raw.HmacKey + var err error + err = runWithRetry(ctx, func() error { + hkPb, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + + return pbHmacKeyToHMACKey(hkPb, true) +} + +// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. +type HMACKeyAttrsToUpdate struct { + // State is required and must be either StateActive or StateInactive. + State HMACState + + // Etag is an optional field and it is the HTTP/1.1 Entity tag. + Etag string +} + +// Update mutates the HMACKey referred to by accessID. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) { + if au.State != Active && au.State != Inactive { + return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) + } + + call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{ + Etag: au.Etag, + State: string(au.State), + }) + setClientHeader(call.Header()) + + var metadata *raw.HmacKeyMetadata + var err error + err = runWithRetry(ctx, func() error { + metadata, err = call.Context(ctx).Do() + return err + }) + + if err != nil { + return nil, err + } + hkPb := &raw.HmacKey{ + Metadata: metadata, + } + return pbHmacKeyToHMACKey(hkPb, false) +} + +// An HMACKeysIterator is an iterator over HMACKeys. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. +type HMACKeysIterator struct { + ctx context.Context + raw *raw.ProjectsHmacKeysService + projectID string + hmacKeys []*HMACKey + pageInfo *iterator.PageInfo + nextFunc func() error + index int +} + +// ListHMACKeys returns an iterator for listing HMACKeys. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIterator { + it := &HMACKeysIterator{ + ctx: ctx, + raw: raw.NewProjectsHmacKeysService(c.raw), + projectID: projectID, + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.hmacKeys) - it.index }, + func() interface{} { + prev := it.hmacKeys + it.hmacKeys = it.hmacKeys[:0] + it.index = 0 + return prev + }) + return it +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (it *HMACKeysIterator) Next() (*HMACKey, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + + key := it.hmacKeys[it.index] + it.index++ + + return key, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// This method is EXPERIMENTAL and subject to change or removal without notice. +func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { + call := it.raw.List(it.projectID) + setClientHeader(call.Header()) + call = call.PageToken(pageToken) + // By default we'll also show deleted keys and then + // let users filter on their own. + call = call.ShowDeletedKeys(true) + if pageSize > 0 { + call = call.MaxResults(int64(pageSize)) + } + + ctx := it.ctx + var resp *raw.HmacKeysMetadata + err = runWithRetry(it.ctx, func() error { + resp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return "", err + } + + for _, metadata := range resp.Items { + hkPb := &raw.HmacKey{ + Metadata: metadata, + } + hkey, err := pbHmacKeyToHMACKey(hkPb, true) + if err != nil { + return "", err + } + it.hmacKeys = append(it.hmacKeys, hkey) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 000000000..9d9360671 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,130 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service + userProject string +} + +func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var rp *raw.Policy + err = runWithRetry(ctx, func() error { + rp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + + rp := iamToStoragePolicy(p) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + return runWithRetry(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var res *raw.TestIamPermissionsResponse + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 000000000..e755f197d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,37 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go/v2" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + if shouldRetry(err) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go new file mode 100644 index 000000000..66fa45bea --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go110.go @@ -0,0 +1,42 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.10 + +package storage + +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry on REFUSED_STREAM. + // Unfortunately the error type is unexported, so we resort to string + // matching. + return strings.Contains(e.Error(), "REFUSED_STREAM") + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 000000000..84619b6d5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,188 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "regexp" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 000000000..5c83651bd --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,403 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// ReaderObjectAttrs are attributes about the object being read. These are populated +// during the New call. This struct only holds a subset of object attributes: to +// get the full set of attributes, use ObjectHandle.Attrs. +// +// Each field is read-only. +type ReaderObjectAttrs struct { + // Size is the length of the object's content. + Size int64 + + // StartOffset is the byte offset within the object + // from which reading begins. + // This value is only non-zero for range requests. + StartOffset int64 + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // CacheControl specifies whether and for how long browser and Internet + // caches are allowed to cache your objects. + CacheControl string + + // LastModified is the time that the object was last modified. + LastModified time.Time + + // Generation is the generation number of the object's content. + Generation int64 + + // Metageneration is the version of the metadata for this object at + // this generation. This field is used for preconditions and for + // detecting changes in metadata. A metageneration number is only + // meaningful in the context of a particular generation of a + // particular object. + Metageneration int64 +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. If offset is negative, the object is read abs(offset) bytes +// from the end, and length must also be negative to indicate all remaining +// bytes will be read. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 && length >= 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: o.c.scheme, + Host: o.c.readHost, + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + + gen := o.gen + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + start := offset + seen + if length < 0 && start < 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + req.URL.RawQuery = conditionsQuery(gen, o.conds) + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + gen = gen64 + } + return nil + }) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Generation") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + body: body, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reopen: reopen, + }, nil +} + +func uncompressedByServer(res *http.Response) bool { + // If the data is stored as gzip but is not encoded as gzip, then it + // was uncompressed by the server. + return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && + res.Header.Get("Content-Encoding") != "gzip" +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +// +// Typically, a Reader computes the CRC of the downloaded content and compares it to +// the stored CRC, returning an error from Read if there is a mismatch. This integrity check +// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. +type Reader struct { + Attrs ReaderObjectAttrs + body io.ReadCloser + seen, remain, size int64 + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc + reopen func(seen int64) (*http.Response, error) +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.readWithRetry(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if err == io.EOF { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + } + return n, err +} + +func (r *Reader) readWithRetry(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if !shouldRetryRead(err) { + return n, err + } + // Read failed, but we will try again. Send a ranged read request that takes + // into account the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func shouldRetryRead(err error) bool { + if err == nil { + return false + } + return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +// +// Deprecated: use Reader.Attrs.Size. +func (r *Reader) Size() int64 { + return r.Attrs.Size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +// +// Deprecated: use Reader.Attrs.ContentType. +func (r *Reader) ContentType() string { + return r.Attrs.ContentType +} + +// ContentEncoding returns the content encoding of the object. +// +// Deprecated: use Reader.Attrs.ContentEncoding. +func (r *Reader) ContentEncoding() string { + return r.Attrs.ContentEncoding +} + +// CacheControl returns the cache control of the object. +// +// Deprecated: use Reader.Attrs.CacheControl. +func (r *Reader) CacheControl() string { + return r.Attrs.CacheControl +} + +// LastModified returns the value of the Last-Modified header. +// +// Deprecated: use Reader.Attrs.LastModified. +func (r *Reader) LastModified() (time.Time, error) { + return r.Attrs.LastModified, nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 000000000..d35bd7568 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1369 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/internal/version" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" + htransport "google.golang.org/api/transport/http" +) + +var ( + // ErrBucketNotExist indicates that the bucket does not exist. + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + // ErrObjectNotExist indicates that the object does not exist. + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service + // Scheme describes the scheme under the current host. + scheme string + // EnvHost is the host set on the STORAGE_EMULATOR_HOST variable. + envHost string + // ReadHost is the default host used on the reader. + readHost string +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + scheme := "https" + var host, readHost string + if host = os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { + scheme = "http" + readHost = host + } else { + readHost = "storage.googleapis.com" + } + return &Client{ + hc: hc, + raw: rawService, + scheme: scheme, + envHost: host, + readHost: readHost, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses will panic. + c.hc = nil + c.raw = nil + return nil +} + +// SigningScheme determines the API version to use when signing URLs. +type SigningScheme int + +const ( + // SigningSchemeDefault is presently V2 and will change to V4 in the future. + SigningSchemeDefault SigningScheme = iota + + // SigningSchemeV2 uses the V2 scheme to sign URLs. + SigningSchemeV2 + + // SigningSchemeV4 uses the V4 scheme to sign URLs. + SigningSchemeV4 +) + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. For SigningSchemeV4, the expiration may be no + // more than seven days in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string + + // Scheme determines the version of URL signing to use. Default is + // SigningSchemeV2. + Scheme SigningScheme +} + +var ( + tabRegex = regexp.MustCompile(`[\t]+`) + // I was tempted to call this spacex. :) + spaceRegex = regexp.MustCompile(` +`) + + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// v2SanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +func v2SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var header, value string + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + header = headerMatches[1] + value = headerMatches[2] + + header = strings.ToLower(strings.TrimSpace(header)) + value = strings.TrimSpace(value) + + if excludedCanonicalHeaders[header] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// v4SanitizeHeaders applies the specifications for canonical extension headers +// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// +// V4 does a couple things differently from V2: +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. +func v4SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var key, value string + headerMatches := strings.Split(sanitizedHeader, ":") + if len(headerMatches) < 2 { + continue + } + + key = headerMatches[0] + value = headerMatches[1] + + key = strings.ToLower(strings.TrimSpace(key)) + value = strings.TrimSpace(value) + value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) + value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[key] = append(headerMap[key], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + now := utcNow() + if err := validateOptions(opts, now); err != nil { + return "", err + } + + switch opts.Scheme { + case SigningSchemeV2: + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + case SigningSchemeV4: + opts.Headers = v4SanitizeHeaders(opts.Headers) + return signedURLV4(bucket, name, opts, now) + default: // SigningSchemeDefault + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + } +} + +func validateOptions(opts *SignedURLOptions, now time.Time) error { + if opts == nil { + return errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return errors.New("storage: invalid MD5 checksum") + } + } + if opts.Scheme == SigningSchemeV4 { + cutoff := now.Add(604801 * time.Second) // 7 days + 1 second + if !opts.Expires.Before(cutoff) { + return errors.New("storage: expires must be within seven days from now") + } + } + return nil +} + +const ( + iso8601 = "20060102T150405Z" + yearMonthDay = "20060102" +) + +// utcNow returns the current time in UTC and is a variable to allow for +// reassignment in tests to provide deterministic signed URL values. +var utcNow = func() time.Time { + return time.Now().UTC() +} + +// extractHeaderNames takes in a series of key:value headers and returns the +// header names only. +func extractHeaderNames(kvs []string) []string { + var res []string + for _, header := range kvs { + nameValue := strings.Split(header, ":") + res = append(res, nameValue[0]) + } + return res +} + +// signedURLV4 creates a signed URL using the sigV4 algorithm. +func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + u := &url.URL{Path: bucket} + if name != "" { + u.Path += "/" + name + } + + // Note: we have to add a / here because GCS does so auto-magically, despite + // Go's EscapedPath not doing so (and we have to exactly match their + // canonical query). + fmt.Fprintf(buf, "/%s\n", u.EscapedPath()) + + headerNames := append(extractHeaderNames(opts.Headers), "host") + if opts.ContentType != "" { + headerNames = append(headerNames, "content-type") + } + if opts.MD5 != "" { + headerNames = append(headerNames, "content-md5") + } + sort.Strings(headerNames) + signedHeaders := strings.Join(headerNames, ";") + timestamp := now.Format(iso8601) + credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) + canonicalQueryString := url.Values{ + "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, + "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, + "X-Goog-Date": {timestamp}, + "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, + "X-Goog-SignedHeaders": {signedHeaders}, + } + fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + + u.Host = "storage.googleapis.com" + + var headersWithValue []string + headersWithValue = append(headersWithValue, "host:"+u.Host) + headersWithValue = append(headersWithValue, opts.Headers...) + if opts.ContentType != "" { + headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType)) + } + if opts.MD5 != "" { + headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5)) + } + canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n") + fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) + fmt.Fprintf(buf, "%s\n", signedHeaders) + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + + sum := sha256.Sum256(buf.Bytes()) + hexDigest := hex.EncodeToString(sum[:]) + signBuf := &bytes.Buffer{} + fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") + fmt.Fprintf(signBuf, "%s\n", timestamp) + fmt.Fprintf(signBuf, "%s\n", credentialScope) + fmt.Fprintf(signBuf, "%s", hexDigest) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + b, err := signBytes(signBuf.Bytes()) + if err != nil { + return "", err + } + signature := hex.EncodeToString(b) + canonicalQueryString.Set("X-Goog-Signature", string(signature)) + u.Scheme = "https" + u.RawQuery = canonicalQueryString.Encode() + return u.String(), nil +} + +// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header +// key. +func sortHeadersByKey(hdrs []string) []string { + headersMap := map[string]string{} + var headersKeys []string + for _, h := range hdrs { + parts := strings.Split(h, ":") + k := parts[0] + v := parts[1] + headersMap[k] = v + headersKeys = append(headersKeys, k) + } + sort.Strings(headersKeys) + var sorted []string + for _, k := range headersKeys { + v := headersMap[k] + sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) + } + return sorted +} + +func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// BucketName returns the name of the bucket. +func (o *ObjectHandle) BucketName() string { + return o.bucket +} + +// ObjectName returns the name of the object. +func (o *ObjectHandle) ObjectName() string { + return o.object +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + EventBasedHold optional.Bool + TemporaryHold optional.Bool + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. ACL must be nil. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. + PredefinedACL string +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. To +// stop writing without saving the data, cancel the context. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + var ret string + if !o.RetentionExpirationTime.IsZero() { + ret = o.RetentionExpirationTime.Format(time.RFC3339) + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: ret, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toRawObjectACL(o.ACL), + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // EventBasedHold specifies whether an object is under event-based hold. New + // objects created in a bucket whose DefaultEventBasedHold is set will + // default to that value. + EventBasedHold bool + + // TemporaryHold specifies whether an object is under temporary hold. While + // this flag is set to true, the object is protected against deletion and + // overwrites. + TemporaryHold bool + + // RetentionExpirationTime is a server-determined value that specifies the + // earliest time that the object's retention period expires. + // This is a read-only field. + RetentionExpirationTime time.Time + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. It should be set + // only when writing, copying or composing an object. When copying or composing, + // it acts as the destinationPredefinedAcl parameter. + // PredefinedACL is always empty for ObjectAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert + // for valid values. + PredefinedACL string + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only, except when + // used from a Writer. If set on a Writer and Writer.SendCRC32C + // is true, the uploaded data is rejected if its CRC32c hash does not + // match this field. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, + // if the object is encrypted by such a key. + // + // Providing both a KMSKeyName and a customer-supplied encryption key (via + // ObjectHandle.Key) will result in an error when writing an object. + KMSKeyName string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string + + // Etag is the HTTP/1.1 Entity tag for the object. + // This field is read-only. + Etag string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertTime(o.RetentionExpirationTime), + ACL: toObjectACLRules(o.Acl), + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + KMSKeyName: o.KmsKeyName, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + Etag: o.Etag, + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { + r := c.raw.Projects.ServiceAccount.Get(projectID) + res, err := r.Context(ctx).Do() + if err != nil { + return "", err + } + return res.EmailAddress, nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.replay b/vendor/cloud.google.com/go/storage/storage.replay new file mode 100644 index 000000000..07ed1bfaf --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.replay @@ -0,0 +1,30067 @@ +{ + "Initial": "IjIwMTktMDUtMDJUMjI6MjM6NTMuNDAzNDMyMDEzWiI=", + "Version": "0.2", + "Converter": { + "ClearHeaders": [ + "^X-Goog-.*Encryption-Key$" + ], + "RemoveRequestHeaders": [ + "^Authorization$", + "^Proxy-Authorization$", + "^Connection$", + "^Content-Type$", + "^Date$", + "^Host$", + "^Transfer-Encoding$", + "^Via$", + "^X-Forwarded-.*$", + "^X-Cloud-Trace-Context$", + "^X-Goog-Api-Client$", + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "RemoveResponseHeaders": [ + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "ClearParams": null, + "RemoveParams": null + }, + "Entries": [ + { + "ID": "f5f231bed6e14b7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:54 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZvgYBWgsCwPaGI9bo1ccC0WCBc8kJgydTwioDtXR9xps4HiDoKXI-vjYUl876SMqF0JhmhaEBgvxrIL9Y989YCFrH65xGys_r1JbPdi9M9N0kS3M" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "9a9914424ef59619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFvRYrCleVqpn0QshSvzW5I1-8o7N6vGYh8o5G1f-AHnsX2N_x-NKJrvlxnXqm9auw5gMoWFaJTSTtKL5y85WlQ_eAjmmlrkD4tbHYBZJ386xgaZw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "17f2abbdd781a33b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYoTmTG5mxpFGPvmECUTlGMlQwhfmqGsZtBtZ9xV89Pw3q-p5BBeX_3imdofr_7EBT7nBm4v5alpg45Zi8a8ET28qBH2xfNe4n15HR-1fhGou2wQU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "6752f5a9a036af11", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4bFsk4ylv96GsTkuDKG--hVaCR_UEhZ_fAzMGt5Eu5ZKncHOLjU_f2PcNP9saFGW-UkH9jXwt_nuR0G2zXOBjMJmLdd7Ml61bGMMrJeVa0OtcGpM" + ] + }, + "Body": "" + } + }, + { + "ID": "9c25646df7aacad9", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq6CjN9PjjzT3LmHxW_tU_ciQ1rahetoQGbX_gX8EC5il7tPJi2yxi5VZxnDNrp1h14b7Ix8tnvtkHufAyO1-lMRutdHK5GSzonff78Nm6KPAKN5fU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "f795b9adcb1b546e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2-mWQyRDbFSpF6U96vQpaBYr74NgiUWh3-KZnLWaFYnhQti1tgKWNtL15YgK8blaRSnzGeACPA6jNuM34yhr7bxztrdN2tobEQAzD5RVgzpqx14w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "2ee3f84c4e4045fb", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplJEr-Hxa3hDFT5ozLEHYhHfaxlYFpc9Vwm8AL831-w_7BBgxHjjEsU8Br_uLnLes0h9hz37iuE9V8uVZ2liHY7ZD4piNH31oyapjCtwyXrukIP94" + ] + }, + "Body": "" + } + }, + { + "ID": "16f19dbf8e206756", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqvp5XjvB8nhNuz-bTeN9OklTfiBGldYKkcY13JF6oUfpV0z_jwoEQD3B3Ss3wWpaSmZfePjo7fkkr-hP3jbrUazNHaqQliiHqOBSNmSoPmwJpzfOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "949f5ce411d6f672", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpVeVcmmuUyOt3Hbja89_Ewi6GRsJtRduqK93OT4Ys1aK5GqDWeGxyDbczUyRLeUYvZgtJzYLwVOOUszqAF4ipSXgZ1L_byd9cJ7ttVfQ_ceQBXxY4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b75303fbdafb66d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "64" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDnNlC3m95GplHjE79aqzhtwgfJCWQjHaCFG4i7qmTFliz2gdE4OiOnKAPIoNqxEngE35065YXNYA65aMSSeEluDKmQ__rJXcS_DpRdoYP4rZIyPo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OC40MzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxhYmVscyI6eyJlbXB0eSI6IiIsImwxIjoidjEifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "831805b62d969707", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiYWJzZW50IjpudWxsLCJlbXB0eSI6bnVsbCwibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2495" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8yNb3V9Kq8zPa_pdVrJIYv83v4fu6xAHwktfTz_Cy4K1rpi8xrDzYmw5wICaazfMcAiYPhM8r4Y6WkeOyeCRInvkJ6ndduhNN_dgu1U59uI5F3Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS4wMzJaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "8a816d061fe9e7e0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "77" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbjywEmDPkqxln7-Nx_8ngRxoWvncfCx1fGVpPZGEjjmg8OJgv0uaczxapjlNeEcvMnqWI_RVzG6_588QaO8nCnPVnE2kkIek3D_t6UNL9CCPYLRc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS41MzBaIiwibWV0YWdlbmVyYXRpb24iOiI0IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FRPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVE9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBUT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVE9In0=" + } + }, + { + "ID": "6a60397ebae323e7", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiYnVja2V0UG9saWN5T25seSJ9Cg==", + "dGVzdA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UokOvJnDKKHAQOa8mJLrxFpWWvQ2U_BC_3uI0Z4x870Q068evHio_t_YudbSq614h77-ofhBsyHpoknWnm_YrnXxkHzopreKoMBykFIcbsSB8TDKEE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRQb2xpY3lPbmx5LzE1NTY4MzU4NDAwNTUxNjEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5IiwibmFtZSI6ImJ1Y2tldFBvbGljeU9ubHkiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMC4wNTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDAuMDU0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAwLjA1NFoiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJDWTlyelVZaDAzUEszazZESmllMDlnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seT9nZW5lcmF0aW9uPTE1NTY4MzU4NDAwNTUxNjEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJocUJ5d0E9PSIsImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0=" + } + }, + { + "ID": "32d392d1da32f27b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl/user-test%40example.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "111" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoau0xgFp6ib5wM0bBWjRlklvDRPOu0VZ-LFCeENUWXutmkXSfgUbtr2Nuefb7Pm_yLvCNqtB9B6k_N1V7AlvkN4_JEz67ZSXQ_sAD5L1teQIpGiqA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InVzZXItdGVzdEBleGFtcGxlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9" + } + }, + { + "ID": "6e3d0eda38a3ab6e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "59" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6dHJ1ZX19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "663" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpSPFJHLzusxOr_OvhStJlWEpOs2EuthMO0Ys6pS9bsQeP0fthp_VUZfa8_sN8TX6PJYpxIdFlxB2QUaIujot1cUrssoU74XFrAwoqhlmiE5y9Aw-w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMS4yNDJaIiwibWV0YWdlbmVyYXRpb24iOiI1IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOnRydWUsImxvY2tlZFRpbWUiOiIyMDE5LTA3LTMxVDIyOjI0OjAxLjIzMFoifX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FVPSJ9" + } + }, + { + "ID": "8f8dbb687dc49edb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGpW2zLlN7nAgV5IVkKU3kx4QWHCkzAgMa-QPC1PyCol8CP9W605bMUmFMeerbR4enzmeNMvtb4a2HzBPUZ296YfGdtkt_6Guq82E226xzC5TPq4w" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"invalid","message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.INVALID_VALUE, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=INVALID_VALUE, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., unnamedArguments=[]}, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., reason=invalid, rpcCode=400} Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only."}}" + } + }, + { + "ID": "42ce6421b2c9fae4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13358" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJwxja3nYzWYbg_I5gWvOiow2ORuo8tNA-_Vzw7DX_YVhhb6_p1giUk3WjUHWt-lyDA13adhPGi4BDfIXzQyf-ZL3lsHoa2sNm29BIqRznw4mkAdE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., unnamedArguments=[]}, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., reason=forbidden, rpcCode=403} another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly."}}" + } + }, + { + "ID": "0ca0bddf513110f4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "45" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnt9fX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "624" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpdDWPDU9-gPzHEieE0Rqx_40yf8fJLhwAP6fVsdS4F7I7sWj0h-Ti7VoDWciZgI_lgNUB7qyh08wjTAxrTLTsSiIYt2GR6ksxhDRupMoPWni5kXmE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FZPSJ9" + } + }, + { + "ID": "8a8bee740102593d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2964" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq59Mf8Ea4fgpDnUzupeIP3bGt3VpyI6HjL4KJtDKAD_h-Ua-AJSX3u3x4TCsx2MZcIVhMs9pW9SWsrIcsvsr3kGt2Je9W87LElbN5dlw02EItBcR4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJidWNrZXRQb2xpY3lPbmx5IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDAwNTUxNjEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwiZXRhZyI6IkNQbXU0Ym54L2VFQ0VBST0ifV19" + } + }, + { + "ID": "91ba2c22c5f5de9a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrwotKay181GHzZsWfp6BzJA4FDOIfK2s1WlzB9p8QsIEX42AtvMhkgLWqSIyEhb-MSv9snqx0WRwUs5sDN3_5NVoFTzQeLkDBR9mbsixI-udc8SLI" + ] + }, + "Body": "" + } + }, + { + "ID": "43fec6c3a8c8cb63", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY29uZGRlbCJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3161" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Etag": [ + "CNHLq7vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrYnzZRz4GFV3BsHoF-vv9v2Lc13Wp6-P5iowSZFjqyykVhYZ6CkesIVxA2v2xNCbVeWgCBXCFFt9fje73cis1cECwwqrYLr5QF5bZCy4TsJ-LT8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb25kZGVsLzE1NTY4MzU4NDMzNjg0MDEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsIiwibmFtZSI6ImNvbmRkZWwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMy4zNjhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDMuMzY4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAzLjM2OFoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbD9nZW5lcmF0aW9uPTE1NTY4MzU4NDMzNjg0MDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbmRkZWwvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbmRkZWwvMTU1NjgzNTg0MzM2ODQwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0=" + } + }, + { + "ID": "ba03d9efb1402903", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368400\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12249" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-mcvaL-eBgBoviwBg_r8LyEEK3JNyFaj0cFy2neOMo1pVkWpkGQ-3gz8vQNtAGoX-Q7_CMYLNv_I0pOoy5iRr-MdYKny5ICdC1s3ji5DwL7sqmn0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/conddel: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel"}}" + } + }, + { + "ID": "e26af2cd4673cc7c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationMatch=2\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12051" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uopfd5TJBzhGauzgyZW0h-TNFtDHz34k0kjbeuTeQPnMGBaiSFz9FdWA2gxp7qKp-V586voh7kqHgnVSW_QI4bWEuC35pj8NbCmmpNL_9pstpCgckw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "857f8a30eb55b023", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationNotMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 304, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uprv3QMjU4zz49ScYI4YaY9FitNwO7wGMQ1KZ8Y99w4D7keznFfA8M80bMdKvSH55jsWsDoLKcQ1VvIqIEUw88NqSfUM7E5SR_y5zfDaCf_uHSlgPM" + ] + }, + "Body": "" + } + }, + { + "ID": "4c69a7dc19302935", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368401\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoiZa1zqMV-8ZkeUrvT6Xi0uJul6yGWWth5s3YbFtA2p0-6vc_54sNtEbxbnsASZyMDXLelHaCSixgcVT6JDVELrXHDim9gHcjjlSTF6BsHWu181A" + ] + }, + "Body": "" + } + }, + { + "ID": "834f05b1eb2dbc32", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMSJ9Cg==", + "TuDshcL7vdCAXh8L42NvEQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozIFQVqWdhJxDLMyV7dfkeraSOw2Mw_AsI_aCWnAUudrz4HjQgZ4kbnvKXJfNF3SMQhxzxZHhk1Otmw7PgPxL7HyDkPDyK1DeHDc8GyfY1B3Q3YfA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "34a1730e8bbe1d09", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMiJ9Cg==", + "55GZ37DvGFQS3PnkEKv3Jg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVrbP1vUH6TU_zfm_Aaca624z-91MoULnLIBcn9Hg4htv5T5Z6B4XYJMFZUuDjWWeBhR6EpG1UZL4iJe0TJybYQ2CCsB_k63CcOex39xaOIT8UYUA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "b7c2c8ec1e65fb6d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9Cg==", + "kT7fkMXrRdrhn2P2+EeT5g==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrEgFAbU2gAqyKMITI9vr9kAgIBfNY3ZGs1l2_X4e-fVtNb01M9N81N-83LdChVzrk8iB09yuKUKdxRc0nMYrgy7S1fqPwbJdsQ6TJfMB05JJj6qr4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "0dbae3d4b454f434", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk7CzWGAqaT12X_fTymveyPtsPJIPHD814QaAOIQLA_AJo_4OtnQOOXJM2jJhIZ1Co4NgEmboDUG9su4SN5fDTHObNh9elLts9VpUJ9iSYrIsn-Os" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "3793882b91d38a07", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpzUH_BRjU20OGbDAW2dy0lx9Gk_1Ko7zks6KG6OEpq0pBPfCIBjkCeIZTxKxtvxnOd1hqlZF7SlbNoyNUqX5UFXpZY5NjP5GTeUkm512vaBR5vnDI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "39924c2826bcd33f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgOr-dpWoY-JJi6vRigJX3Mpi_WQ4zWvWKK382DCP-mzWSnrpbaOzijhTCdNz6pmXskVVs30APwlqZgvb-S6xAwXqKJG5n6832Py4UlTdDR_INTew" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "89ea9ab5e21a635e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqj9dj5PbUhIU3baBv98vMViDP-BnVPs0APrFYL4jSbKc7fK6eAGoyDfsccGzxK-t0lGvozizW4ltrga8DXo_oxlZ9-k5a85v9i0PWTFIisPC7xuL8" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b27c106562362f6b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIo3JJa2rfg9_o3bXN_5QU6XU_iIrWYfuEuvKTQL0O5tjIHkODIDd4biyHxjbGNFnrQNbkiEUXWEBlo-3fTVgfFTOWuaPpgae-SafTBP8h5X5ddJ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "ca654b46531c4cb2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqINI0Ii418NlxoYErOrjhTKb3-G3n-8h1Ryc_4YT4Tksc7WXA9m4CcJWInyhJahYC-UUrM47O5EK-3FUt3toZOZxWlbwhE6Sfnra0H-CqjmvxWyRs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "0186889a60e651db", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFTK3QnFf4m9htpY4s3P9_NHuDiQl8InpJoWPOaQHo0XaYnSoSqkH7CcUbm0-sWamsCZbd4DHoXHLCrea3ff9rLpWlptkL0aMKV_Dleb1Xm-j14fQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "a253776f79c46fe1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoadifz-4B7kC7DvXOyrNa5omK5-DXUNJBtT_d5I1jciPUvGlRm1L1vkvVMU1Y5n9zFig2CF_qqBLlcdvoYCaUeaNUu706L0fKJhJkA8vV5JUvFq0E" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "dc8959751769ee9c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLeV9n2aiAq15pl3CqEWOt3_OYHsxbG0lHFIKr7Emh5btUEXLXl4nZWPi27LinKW7i0LYvp-HZK5b9E742MR_PNFe87treTpz_QgUmchGPKLcdZVM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "23f7f167269db6b2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4hY4jSbETgB5jf-AU8_b1sRvfTSrlt_Pt6UXiuTte4GtueVBgDwuMEliwE2-nOSiRE6juXOCbR1LQlRrpek1TLeRRf1cCVbz90YcCIGhWV_zFz0o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "518c5855f7f2035b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGaiKkmr1oBduBvG8APGBVoM_Ve7zHw4TBuyV3QYcFr9SYzEnATEwE5P6BH-yBsVXSmaRLej1a55x18Bra_ZAC7nYh2UKvWM66JGE3U1muaDBabyw" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "5887398d8cd8c906", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGAvr7ojM3PcmnrTMTX-TXgYKzfo-3gzwYU_l9OaCfpth_ad2lWUJf6w6B8pjEGsAFZJvNueCHYGSJdx6YJ7NAe71oah1xi6lGQvQkWEwAl0fXd7Y" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "81d53e304c7ed90d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur9XEMeCefUS1K7QkIxrxZboQ1zQ5SkrEUZMzrQdNBNAuDQnpDczXzJvF-rZmxFVYSdMySScTTu-FICOdI91b-fQVRomrqAxJwgn60FoObIVVpgkog" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "0be1bdae5ba86bfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UocXesX_BUCirPixQFUhYfAIoR0Os309HMGHarzTGrqH1PkmHuGaaiNSH_pJq8nnWUXEnjk1cvuPGJWCF21t7EJQtCIGBWQoPBNV6OpvALZGdpPXuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "592a3fdf8b5a83e5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoS2FM55Dkg1PRaNQBzsu5KBns-svmRrQ5byrUGRHgnsBQQDdE3ZznljVTrNq3wDAClddQ4zbCMQSLdqxPPNlom-n1tMg1hO8s6kS8_CTWy2SOM6As" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b01846866b585241", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo22tWMx8uW6afXbfFrFT8UZzPbsqPItYMdRAPHNEuksgeqWI2US_xblUG6C8WgcHvfEeR_19eVmBTmW6QE7rmosaIm_raZxW9FuCHHiSi8TJnZ1CI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "2eeefee032c6e805", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq55nB00t16AHTo1gSxBFUNplz5SJcGYBOeCPsK-MD5OLO_kHKT3YNZq69AAHpozaWVrTU_jDQqiqMFSoPhhmlF2dR1mcq6Ihk_y8uuFY6FM4O_hmI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "11bfe17bc978cdfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotgN85lDspseXEgwnXhmfSmio9oOxSzhqtyabApVH0KQz_aVg3DbQ7m0Z0L9SzPzzmNEUfU6xhAT5xQAXf-wvY1NHDFxdf9IJ6YoVNhN0aHN-75hY" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "c05bfa6f1a43381b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVVjXVa2SMY6cjYeIgdcZOivQwE2Crz6UPZs2VXVBFMwReZs7kKbETuTMwuEMHKKm_LIrk-o6jS07MimubSWVxGMzT1BtCacU1X2Go_RckmyJPZso" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "f2932604385db16e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrADckAOw2vMHdy0qy02f20x-8s_Ue81-NYwSpKeqp6Zx6eCPReLM6qXUyxFz0xHNGob5WJA8J4ctl6ZFzL2fEVwBMNY8xxzpkKOAwSu9n9e0OE63E" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "8581afc6216ad2cb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrkoYpGTn57WC5t-sItsEdWimp47AtSuqw8autFOD3TirChrdQThg_Fh-kykfgvnTaOyiw1InKeYs0Z2MISmjUpu4uHUUGDKTX6W0zQEuUks7i2BqQ" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "ae129e597d5dd3e8", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoaoET13zBO6-kmfarxy5tCZhmqc2oO38xOd6m6OQ55e-MyyKYM35hNakm5xYdWaoUNsKwVCiklp4HtYE8afVObxcDERuTCgyTw-olewCN6VBwd-H0" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "11acf26dc7680b3e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up_XtsS2bu4fnGI-Z85gqz1MECYqweRqin42arAyEfpJpZwmadI1-Op9V7n-q3UaYyRtUWFR7an7zKxWhJ_CB6PXz-C55C1nPoI7EWVEV2oXcS-q8c" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "ff72204b0c538268", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrjLdnTX47-_XGNBlBZ0rEgsIkYJMzixBaVmhjn8IsK66UpQAUO-njMM-WznI-DBNIbXVTcPQKZBNe2ZHZEskfuRjsFwMjtooyUH_PqTfZnARVv8M" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "7972502f866e015e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-15" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNiJIZYFETpngMkDItRU7PtN7Fv4bDU7Kfvndst1JR2eGmW-tOMTfng8Abx6EWwPvvodLkSk-1TM4Gywxzh3c24gPDN4NsQMuh40Mfp0Jvg1syOls" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "251a62254c59cbd1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-7" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 0-7/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UobLFpkqodTM1bHuzS0GzRCKZypPQkf3nH1hbOexCjgjAcheZhV_zxJRyLhOYZoW8ABEaVMJLULNrzAm1VZs4JrTdtT46fYIcQe7OBeJp0aHI7Lr5s" + ] + }, + "Body": "TuDshcL7vdA=" + } + }, + { + "ID": "2482617229166e50", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-23" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uon8ZtKgOTqFm36bWHd3hf535jPGQBEX9j2yPrr11_ycAJjfI4A-mFWMTsFRR3nwSo68784B9TUPUjQd0cib0QIrfBjfDcz91p6oPJG3VpV9afywIY" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "74ff0c5848c7dba6", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKc-08ZJEWVbyaV84xKlnrHkl2PBuBUQP6xtx6TzZW0fsjyZ-SHshiy-QMGCuP1UF4x1ettHyDvRbAleHIXy4y7wMwoUWST2NKs_0oRA0oVqOoKtQ" + ] + }, + "Body": "" + } + }, + { + "ID": "90888164f5d2d40d", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTSlg3r_LSoiqQyJVJDzL-0xNj6jlSNp9zLUmlPtu3xbhCLBDNAxY9CRbyEjw7UudDcsFOe43C3QlsVjoBJln1q179ZspYWXY-fHqjrztAfJkZUpU" + ] + }, + "Body": "" + } + }, + { + "ID": "7961f3dc74fa0663", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJjC8HFLb8u_EaJVF0_TUOhNWhbooW0oANE8_LPjoIdLcoU42caZe7LjgbTIjCb0Ss3horP2noxirF3u3FGq0Yoh4rjuHBVhwZcemZHnKLgJUUbQQ" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "af8c1fcfa456592a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-31" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqq6ro5-i6q8pIDup0yW35uFEynuLK95P8OSFhj7b70DK33tjv3fkOdkobsfwplOAGuNME2bLjQuyy8mhd2xBbNjyjvXt13Nc48PB4M2t_46RoM3Ak" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d312c241da5bd348", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=32-41" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 416, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "167" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAvzLs-GgdTPEGM2kvABFTxehSb-DhZQ2Y31nQ9ad0RMmfIMtVGRBXvkXo7FwGAy78ZHMSGWRKG-DYMy6JU9PCM4Tpo8PDmm4-rHa_LVpH2dFN-JM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+SW52YWxpZFJhbmdlPC9Db2RlPjxNZXNzYWdlPlRoZSByZXF1ZXN0ZWQgcmFuZ2UgY2Fubm90IGJlIHNhdGlzZmllZC48L01lc3NhZ2U+PERldGFpbHM+Ynl0ZXM9MzItNDE8L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "2bbd010d0173c966", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZDScoUU4zzBoXc21jt6C1vcW1SrU6ELiWodrCID-OoNKVMbecv_DSgHZATQGs4GS-BebkzqdalqTq5C77aKXQMxiks-9A5kyrc8N4aY9L5YndaI8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "e3522e1cc7aef940", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5x_r1PKMQrnKbsfUWRR6wBwFQTM8U9mGwyq_fK56rrHE_UoLDxLRpiuaFGLVi9L4Hj67UXH76drA6KQQQgOsMVo0YEl1pVS2P7OKQ64WF4NubRCU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FZPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBWT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBWT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVk9In0=" + } + }, + { + "ID": "77ed46692038573c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpBnZgUt8GyAer5VG-_nEuf3YizRjF2t4MS6vOflI_Vid1-zVpQ99TuphB3pZjPZNI5ZoHJxCmDv3lyLwyhUDvk_p_NO9x8qZTEpjf1EOO5uxRsOxo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDE0NDc5MiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC4xNDRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuMTQ0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0LjE0NFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQxNDQ3OTImYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvcHktb2JqMS8xNTU2ODM1ODU0MTQ0NzkyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In19" + } + }, + { + "ID": "9187dbb998c64d40", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb250ZW50RW5jb2RpbmciOiJpZGVudGl0eSJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3299" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyrF7U9KSc3XcJb-T_KGf3Fg2yhFUjJqTl06wpqqhG5IT6chgLCWKnLTuamfHtVq8XcP7acZy4TGyKIEvZ4L36TGTfSM8Zp_JyF8K4rN5p375VBMc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDc0NzU0MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC43NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuNzQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0Ljc0N1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQ3NDc1NDMmYWx0PW1lZGlhIiwiY29udGVudEVuY29kaW5nIjoiaWRlbnRpdHkiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQ3NDc1NDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkNUNmRUQT09IiwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "c199412d75fadc45", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "193" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOlt7ImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9XSwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm1ldGFkYXRhIjp7ImtleSI6InZhbHVlIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2046" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKO5A_yihnTzTSNbXxyLm-I3MkyC58APsD1U6RZ5_xpjjjL3nXVZIyACkeiDKXRZyU_oP1Yt0FeX23ONp6JeNyoy0J0kW8GwGMgPeN1ATdWGzCMXs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS4yMjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJtZXRhZGF0YSI6eyJrZXkiOiJ2YWx1ZSJ9LCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiIsImRvbWFpbiI6Imdvb2dsZS5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9" + } + }, + { + "ID": "c845025158c6b7d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "120" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOm51bGwsImNvbnRlbnRUeXBlIjpudWxsLCJtZXRhZGF0YSI6bnVsbH0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1970" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryOVdmGzCqBOS2ZZ0nw1SqbHF7qJ1CeTxgb6a1BJyoA42NTNZ8RdEVQGNF5u2hWdSQW79cBOonxjms_MoogyDJVAtKHl1rys87QHF6-750NNNSoEw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuNTI3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0=" + } + }, + { + "ID": "811b0b9d4980b14f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY2hlY2tzdW0tb2JqZWN0In0K", + "aGVsbG93b3JsZA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CIGhrMHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJTh_P91pby-vQkRF12GexDihy7TZAlUPamDV_REldvXeqvbrWY_kB0Vcp1lYyuIY7EK2_eMBYYSRMMIVEYz5fxxt51-9Br_UmYvnuRgjhfC16AuA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jaGVja3N1bS1vYmplY3QvMTU1NjgzNTg1NTk2MjI0MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdCIsIm5hbWUiOiJjaGVja3N1bS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE1Ljk2MVoiLCJzaXplIjoiMTAiLCJtZDVIYXNoIjoiL0Y0RGpUaWxjRElJVkVIbi9uQVFzQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdD9nZW5lcmF0aW9uPTE1NTY4MzU4NTU5NjIyNDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY2hlY2tzdW0tb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NoZWNrc3VtLW9iamVjdC8xNTU2ODM1ODU1OTYyMjQxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jaGVja3N1bS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJWc3UwZ0E9PSIsImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0=" + } + }, + { + "ID": "69c77b8bdd7cc643", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVyby1vYmplY3QifQo=", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3240" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CLirz8Hx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2iXVwACD0yFKYjt0WT-lW1Tx6PtpOgDYttPBWBnJZ3CPf4cUK7heI_9SwdzYXoieaRHDj9n3w3M_SExedwQqQ-GTOY9qM9DPmrPy11hEny0WjECc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QiLCJuYW1lIjoiemVyby1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNi41MzZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTYuNTM2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE2LjUzNloiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3Q/Z2VuZXJhdGlvbj0xNTU2ODM1ODU2NTM3MDE2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvLW9iamVjdC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQUFBQUFBPT0iLCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9" + } + }, + { + "ID": "1017883279ca634a", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "98" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "417" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKqYaaFUj6b4ezS4lnnQgYv4CHVSsqhVSlLP3QqpItNahh25KnLzbTccK_idrfjKV0gExzr17MvFmoRw7oUIWyJJINmpEXEw5EmHScxipsb3KbWZ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvYWxsVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0xuUytidngvZUVDRUFRPSJ9" + } + }, + { + "ID": "35f04bf2400be96f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "4" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqC9teasrZdeHJo8KLaQQ251d02ORMxXreH3TKcaQp4NWLVbpiAw1GpW9WeSdFgbpWtVDdyzfjE93gAs6uA0kdlDPZsIEfJVK3GFfaZE2aW5aFCn8Y" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d01040504d2a91c4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoib2JqMSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30343" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRhLZOcm0K7eNByUzCBgYfgiytuZ6Hj06jPkbkK77LS80OPgO_78AL530-2AcrlLe1fIy0jM0tonLLncLuOszrqKcGgVhqEJuE48-67vqBRACjqmY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1."}}" + } + }, + { + "ID": "db327c92dac99584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoa0LrEhOIeq-iYERbwsOkXjr0QR1ANe3tDsleUqoFNU7fRIruaqYikdnU1svzhc8iGRwx0A5dAGYQ_mM045LN_oxU1c76ugXtnWS2PW8wBcmst7hk" + ] + }, + "Body": "" + } + }, + { + "ID": "e70c028bd3dc9250", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12275" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrvFq18HewCbAkJcD7BmKgE8_WOFqSNkcwbpbNqCDPIy5jLVf2fIaHdg76_1rHuAHTtFM84Zr6euptOehRZOqA38Zc_BrJUSZKjg686imvazehpBc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "4d7120b41e583669", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ1cGOQjoNiRk8qv-igyccVp3Vg_ut7NLSEO3A-yyQwgGnoXR5jPbi3tuomYWrS57GIvd6GpShkcAYSIJ2e94Jx_1SseYkYtlSF8a7qsDU0OVFYgM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "e7711da9b067a58f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "156" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6Im9iajEifSx7Im5hbWUiOiJvYmoyIn0seyJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "750" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CI2048Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ5Us6Rp03fUUqSAPFFGPuqW2qR4gCfzJ4w3W1Kx8C0JGfNDxte0QlBoVEi4K7rV5zkUY1IXQvIk6FOU3DxP8ECZdxkVJBW57Jf5iXo8kRyLVu7OA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDEvMTU1NjgzNTg1ODk2Mjk1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMSIsIm5hbWUiOiJjb21wb3NlZDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1ODk2Mjk1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOC45NjJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTguOTYyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE4Ljk2MloiLCJzaXplIjoiNDgiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29tcG9zZWQxP2dlbmVyYXRpb249MTU1NjgzNTg1ODk2Mjk1NyZhbHQ9bWVkaWEiLCJjcmMzMmMiOiJBYldCeVE9PSIsImNvbXBvbmVudENvdW50IjozLCJldGFnIjoiQ0kyMDQ4THgvZUVDRUFFPSJ9" + } + }, + { + "ID": "de5d78de4310ff60", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CI2048Lx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:18 GMT" + ], + "X-Goog-Generation": [ + "1556835858962957" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqprnxhGEtAMeMcwiKn43QTEdsHrB7k_jCjOqnvH984bk5CIjIhSalrDVNwlLb37IU67jIYyIY1Nq8uaSz5HGUROWwpE6SVmGvIGXnuAbTP7x8Ez5w" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "4b7c14a3f35089ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "182" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvanNvbiJ9LCJzb3VyY2VPYmplY3RzIjpbeyJuYW1lIjoib2JqMSJ9LHsibmFtZSI6Im9iajIifSx7Im5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIn1dfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "776" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CP+RiMPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrMYc2wH1kAYUthDQQhZ4gGQU6kYXq6KGs1msu9ifujWYQ2dwS5ifqZFWiTYf74Rq2y9VkzALfhjkTK6anDL-SwHxUH3IwMe8j1rrrbaMbv7eLQCR8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDIvMTU1NjgzNTg1OTU2NDc5OSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMiIsIm5hbWUiOiJjb21wb3NlZDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1OTU2NDc5OSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9qc29uIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE5LjU2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOS41NjRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTkuNTY0WiIsInNpemUiOiI0OCIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb21wb3NlZDI/Z2VuZXJhdGlvbj0xNTU2ODM1ODU5NTY0Nzk5JmFsdD1tZWRpYSIsImNyYzMyYyI6IkFiV0J5UT09IiwiY29tcG9uZW50Q291bnQiOjMsImV0YWciOiJDUCtSaU1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "26a4cb091ed2f1bd", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "text/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CP+RiMPx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:19 GMT" + ], + "X-Goog-Generation": [ + "1556835859564799" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-t-yqapXjBzjIrFWFhaT080f06JahbskCp0DC_-izUjPK4fYn556m2qyRDgj9HjkkDPtKR6dauzF5afjNZZ61bN_dTvw15d82A206-SdS1Jv_YM4" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "6b59ed7d3d098970", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwibmFtZSI6Imd6aXAtdGVzdCJ9Cg==", + "H4sIAAAAAAAA/2IgEgACAAD//7E97OkoAAAA" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3227" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Etag": [ + "CNuJssPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoXNmswaWh2Gfcz1pYPy7etwk09Nz_dmuYVq55M2p3ox38A3mC0QgoHf-hL1uCZxQvusJSIHeugTQvmCgZvRdXV-3juD0W8KI669m0EILAHSfnVoJ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nemlwLXRlc3QvMTU1NjgzNTg2MDI1MTg2NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdCIsIm5hbWUiOiJnemlwLXRlc3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24veC1nemlwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIwLjI1MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMC4yNTFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjAuMjUxWiIsInNpemUiOiIyNyIsIm1kNUhhc2giOiJPdEN3K2FSUklScUtHRkFFT2F4K3F3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0P2dlbmVyYXRpb249MTU1NjgzNTg2MDI1MTg2NyZhbHQ9bWVkaWEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2d6aXAtdGVzdC8xNTU2ODM1ODYwMjUxODY3L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nemlwLXRlc3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiI5RGh3QkE9PSIsImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0=" + } + }, + { + "ID": "070a9af78f7967bf", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/gzip-test", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "none" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Type": [ + "application/x-gzip" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:20 GMT" + ], + "X-Goog-Generation": [ + "1556835860251867" + ], + "X-Goog-Hash": [ + "crc32c=9DhwBA==", + "md5=OtCw+aRRIRqKGFAEOax+qw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "27" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosPALnXk-3j5nBlDDWX7hBP4xBNcCe7lGNkEU-sHHTWsRexFWLoB-1gn8RfnlqS6Q5ZTrR86NBxpP1T0bFxgHnlYUYGh-G3mThRCgGIAWjXggOeOU" + ] + }, + "Body": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + } + }, + { + "ID": "3ece6665583d65fb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj-not-exists", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "225" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uph0zmuYk7ZmOWzbNkj24Iai85wz2vKj0mMnMkIHIoDUlDAUCA0e_CgFT5fV_XAvgUx06I9FspomkB_ImflSVCvj55GK6zEoY5iV1hN96nh4AmPBN0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+Tm9TdWNoS2V5PC9Db2RlPjxNZXNzYWdlPlRoZSBzcGVjaWZpZWQga2V5IGRvZXMgbm90IGV4aXN0LjwvTWVzc2FnZT48RGV0YWlscz5ObyBzdWNoIG9iamVjdDogZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai1ub3QtZXhpc3RzPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "602f4fdfff4e490c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic2lnbmVkVVJMIn0K", + "VGhpcyBpcyBhIHRlc3Qgb2YgU2lnbmVkVVJMLgo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:21 GMT" + ], + "Etag": [ + "CNat6MPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWSi2VcVAHJJzhGSPjVT17kZYaCh3uZvWXBU0R4hVmPZJ10gvbv_Ucrfd2kwrBvRYoZaQcYkY5Q3M-oywiCNtLm6SM9InWHz2Omc-0pWJJjfQWPCM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zaWduZWRVUkwvMTU1NjgzNTg2MTE0MTIwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTCIsIm5hbWUiOiJzaWduZWRVUkwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMS4xNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjEuMTQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIxLjE0MFoiLCJzaXplIjoiMjkiLCJtZDVIYXNoIjoiSnl4dmd3bTluMk1zckdUTVBiTWVZQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTD9nZW5lcmF0aW9uPTE1NTY4MzU4NjExNDEyMDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc2lnbmVkVVJML2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3NpZ25lZFVSTC8xNTU2ODM1ODYxMTQxMjA2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zaWduZWRVUkwvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJaVHFBTHc9PSIsImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "4a1c9dc802f4427c", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbFZENfdiDkKpD_H_zm44x7h5bTZ3VD6MEoiiXuMwxHHK-zCSSoPRA2ALNUXoR3EcS1c-huuyZBIiw7ULKii7BNqKz0RMu4lWRxrDRQBOjGwKDDgE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQWM9In0=" + } + }, + { + "ID": "e12cb7360f4fd8dd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVJI9g41JQEDEgaIAOXKSCe2B8xklFKSoIciM7uC0uod7t7NzXfxsIrI6mQeZGkWa2B12xdKpSJBndrNbBvygZ6aXxpBBvL8d80NJ42u8KQC7AaFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWM9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBYz0ifV19" + } + }, + { + "ID": "d25b1dc2d7247768", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMSJ9Cg==", + "/pXmp0sD+azbBKjod9MhwQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosBTMUgCzCYlGDKqRCN7Kexl832zyi4FxoRqfYtmTr8yv63z4BAQBeCTLajyZqMbRyzjje7TtPPYomUSv_8zrQ8bFdXlNzbTSeKK8kXi2Ys82jus4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxIiwibmFtZSI6ImFjbDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjI0N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC4yNDdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuMjQ3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiIwRTl0Rk5wWmowL1dLT0o2ZlY5cGF3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMT9nZW5lcmF0aW9uPTE1NTY4MzU4NjQyNDgxNzAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkZpRG1WZz09IiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "1d3df0d90130f5fe", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMiJ9Cg==", + "HSHw5Wsm5u7iJL/jjKkPVQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "CJrxw8Xx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMl0_NB41LujALQWmul06CCiuw7okuKlhYG_qilsoXlld2qoYgc2-ABYogriA37QOJMK5ZlJ5sYzP2Mp7CTSzK9uccFLi10TdrGFMUjUpD0OvcXrE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyIiwibmFtZSI6ImFjbDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjczN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC43MzdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuNzM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJjOStPL3JnMjRIVEZCYytldFdqZWZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMj9nZW5lcmF0aW9uPTE1NTY4MzU4NjQ3Mzc5NDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0NzM3OTQ2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0pyeHc4WHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDIvMTU1NjgzNTg2NDczNzk0Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkF0TlJ0QT09IiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a270daf4f72d9d3d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2767" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UranlihhkPcnPzTT2UYs4r6Hritk60lULu-pzO6YX-EWIlMlFiXzKmxaZVgOcPfnCgfJsRktKsL_TbFun1PfupraBREfYklCvRXKvJ_526gW-Jf9zs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "328125b2bc991e95", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosP78wuW4e1mPLJgcCbCxBC4Je5LWjKugd0fVK51u-P3qeXbQXj4_Amo25ZHyut2LZvoipvmK95xEvebSyVyzVydKyU2VSbHLDkwnRpR-nr9UGIf0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c7595376917851f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:26 GMT" + ], + "Etag": [ + "CAg=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqnZXiLrdCFoctnkKUqdvXLdSp9xumFk20gsRJc4xB1CTZ14aLZYwbDJmi90E28Q2_E4klaJBOaB1OCcpbDBoF1N9E9Bk3VEaNYYrHKMoECuO7RMPs" + ] + }, + "Body": "" + } + }, + { + "ID": "4183198b151a3557", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "109" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLWpiZEBnb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "386" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:27 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo361-YkcyyOvd5CL4q3LgTM1Bk4RdkPY3cn-qOm6Dn0vghTnmIE9VKkzGiOnjRNnD7SWXGMqpxaCGvCt6P44D55PqOwAtVTF8PNwmEK9HWCoVXABo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In0=" + } + }, + { + "ID": "e98f53a71d8839c9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1789" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqACfvLjoBUrCN_6ksyMdPlv7yPWQGo1D9u1UiCCgFDRlEwr6NOp18BSxaeTuMIabf2ciNj7_Ba1W6YCz64HlcYyQOoEXNfGYGKcAqGTubXsGHPVk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FrPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In1dfQ==" + } + }, + { + "ID": "28b9a29086758e65", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozsFdtI43Ltv91E18-CrdRZ4pQDVZXReJOVhpWDu3mxNA7rrWU7hNGl4kPMd0FgcVWDigZA5JBNsYbFV113Ew6Cp2uTsuk0uI1io_mhA6-Lmd2h18" + ] + }, + "Body": "" + } + }, + { + "ID": "8ef9f7b6caefd750", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiZ29waGVyIn0K", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3196" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CIXH3cfx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo97hrh9l4pmrJQqaH8Qu3axsdbEPH2Y8GSEso8-ExKuf4ot8o2uS79ROJcgCtql4vLQJ_4L8q0Z7E9E2WYbbl6vKUYKdQiCI0POisS9o_ViNh9-Yk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlciIsIm5hbWUiOiJnb3BoZXIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTM1MjgzNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS4zNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuMzUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5LjM1MloiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyP2dlbmVyYXRpb249MTU1NjgzNTg2OTM1MjgzNyZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nb3BoZXIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ29waGVyLzE1NTY4MzU4NjkzNTI4MzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlci9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InJ0aDkwUT09IiwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ae3a5920bc8a9c3c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3548" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CPzK+8fx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrPLZGQqU8j04JqxMmfsqas3HGTlrdx5NM99YR5nCedJYGMSaZ1ynvd2UbUcdwps_gfRo__0XapzHuD3hM_bshff65qlnw_i2cOwp97N2EhfUsZ1X4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS/Qk9C+0YTQtdGA0L7QstC4LzE1NTY4MzU4Njk4NDQ4NjAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5Ljg0NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS44NDRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuODQ0WiIsInNpemUiOiI0IiwibWQ1SGFzaCI6ImpYZC9PRjA5L3NpQlhTRDNTV0FtM0E9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjg/Z2VuZXJhdGlvbj0xNTU2ODM1ODY5ODQ0ODYwJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ItCT0L7RhNC10YDQvtCy0LgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTg0NDg2MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vJUQwJTkzJUQwJUJFJUQxJTg0JUQwJUI1JUQxJTgwJUQwJUJFJUQwJUIyJUQwJUI4L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoi0JPQvtGE0LXRgNC+0LLQuCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0=" + } + }, + { + "ID": "0104a746dbe20580", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "COKVlMjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqY12miALkWDvx8PHEyL23oxWZlw8Jq7Cn6kS8E8_Tn1tTWWoTuS_pWb5pi9qevCIvqK6aTK56MzwrMuy9axIjpw5yyMZ42MaWK_YFkBr95OnK5IbM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hLzE1NTY4MzU4NzAyNDc2NTAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hIiwibmFtZSI6ImEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMC4yNDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzAuMjQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMwLjI0N1oiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYT9nZW5lcmF0aW9uPTE1NTY4MzU4NzAyNDc2NTAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2EvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2EvMTU1NjgzNTg3MDI0NzY1MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0=" + } + }, + { + "ID": "da19af085be0fb47", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "19484" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "CLmmusjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAJr55dqTDkY_jBKB0vAWyE0owtzB_kEoH91_FKTr9C3bHd8fWUiTrsruL5mgwXg2l8gbPiOXad-A9HIJ3Zt_AeXq_p0m-Q8LrHQqJ6sAKsbzV4Zw" + ] + }, + "Body": "{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","name":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835870872377","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:30.872Z","updated":"2019-05-02T22:24:30.872Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:30.872Z","size":"4","md5Hash":"jXd/OF09/siBXSD3SWAm3A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?generation=1556835870872377&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLmmusjx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"rth90Q==","etag":"CLmmusjx/eECEAE="}" + } + }, + { + "ID": "96f915707a76c50c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2948" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrQIwi81TFCNpDIZUjs_5tSUFqURLnaBF7g2l1sGMuHCTWZeLyr45VWkcc8fUDYnF-P84QixYQbTzJGuJzOOx7mw1qluYDqoORFGPICfvKmNxrZM3A" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "7a562d6e65ef8a7a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "4785" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ups-3Re4_9njFMmkYwND6BuK5cgxs5tKVRwQ2CGkcks-K0aALbethXTYaEb6fLos6w_FSnM8YkPGM3TETCAoyYpVbQPlotXR9CH3DXngjQNv0yv_ZQ" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiVGhlIG1heGltdW0gb2JqZWN0IGxlbmd0aCBpcyAxMDI0IGNoYXJhY3RlcnMsIGJ1dCBnb3QgYSBuYW1lIHdpdGggMTAyNSBjaGFyYWN0ZXJzOiAnJ2FhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhLi4uJyciLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5pZC5uYW1lLCBtZXNzYWdlPVRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnLCB1bm5hbWVkQXJndW1lbnRzPVthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1UaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJywgcmVhc29uPWludmFsaWQsIHJwY0NvZGU9NDAwfSBUaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJ1xuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5FcnJvckNvbGxlY3Rvci50b0ZhdWx0KEVycm9yQ29sbGVjdG9yLmphdmE6NTQpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5RXJyb3JDb252ZXJ0ZXIudG9GYXVsdChSb3N5RXJyb3JDb252ZXJ0ZXIuamF2YTo2Nylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjI1OSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjIzOSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnRocmVhZC5UaHJlYWRUcmFja2VycyRUaHJlYWRUcmFja2luZ1J1bm5hYmxlLnJ1bihUaHJlYWRUcmFja2Vycy5qYXZhOjEyNilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW5JbkNvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6NDUzKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuc2VydmVyLkNvbW1vbk1vZHVsZSRDb250ZXh0Q2FycnlpbmdFeGVjdXRvclNlcnZpY2UkMS5ydW5JbkNvbnRleHQoQ29tbW9uTW9kdWxlLmphdmE6ODAyKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlJDEucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ2MClcblx0YXQgaW8uZ3JwYy5Db250ZXh0LnJ1bihDb250ZXh0LmphdmE6NTY1KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuQ3VycmVudENvbnRleHQucnVuSW5Db250ZXh0KEN1cnJlbnRDb250ZXh0LmphdmE6MjA0KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0Tm9VbnJlZihUcmFjZUNvbnRleHQuamF2YTozMTkpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6MzExKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NTcpXG5cdGF0IGNvbS5nb29nbGUuZ3NlLmludGVybmFsLkRpc3BhdGNoUXVldWVJbXBsJFdvcmtlclRocmVhZC5ydW4oRGlzcGF0Y2hRdWV1ZUltcGwuamF2YTo0MDMpXG4ifV0sImNvZGUiOjQwMCwibWVzc2FnZSI6IlRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnIn19" + } + }, + { + "ID": "239b642e1919a84a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoibmV3XG5saW5lcyJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3270" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urk-XOPoshQkyIHYZp241R2W5lOuLwqPMx606rubzPOaGggM2z_sZO93dYCJHffXuPDOjy64lIxrBXIYW4QT04eM932jPtzPdVd-PFWssV9RYk83JE" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiRGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJyIsImRlYnVnSW5mbyI6ImNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkZhdWx0OiBJbW11dGFibGVFcnJvckRlZmluaXRpb257YmFzZT1JTlZBTElEX1ZBTFVFLCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLklOVkFMSURfVkFMVUUsIGNyZWF0ZWRCeUJhY2tlbmQ9dHJ1ZSwgZGVidWdNZXNzYWdlPW51bGwsIGVycm9yUHJvdG9Db2RlPUlOVkFMSURfVkFMVUUsIGVycm9yUHJvdG9Eb21haW49Z2RhdGEuQ29yZUVycm9yRG9tYWluLCBmaWx0ZXJlZE1lc3NhZ2U9bnVsbCwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9RGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJywgdW5uYW1lZEFyZ3VtZW50cz1bbmV3XG5saW5lc119LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1EaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IERpc2FsbG93ZWQgdW5pY29kZSBjaGFyYWN0ZXJzIHByZXNlbnQgaW4gb2JqZWN0IG5hbWUgJyduZXdcbmxpbmVzJydcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJEaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnIn19" + } + }, + { + "ID": "3cd795cb0b675f98", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpOXjD9T_c_FRByyyA-CjE753kXZefEzIwQdrNG6OewoHOvKC3Bb9LleziQ6-ymVDg-F1S1eeQeFNJH7nJxDsdA_VmxOjeBzTDB_F3--_1NtwMF6Do" + ] + }, + "Body": "" + } + }, + { + "ID": "361d4541e2cb460a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/a?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpomJFlpfOIJoFvdCmQgD2dRXSREzV1ToI2ntcHi8I9tSyzMCUk0ZrytllkwR_nU8WFZ0YuXVl0Kwsq1EhJL85RlBgCkBNc1P-yVMyNbdt5YrooKok" + ] + }, + "Body": "" + } + }, + { + "ID": "a4e929446e49411d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/%D0%93%D0%BE%D1%84%D0%B5%D1%80%D0%BE%D0%B2%D0%B8?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqmdInqhuXhUxVjtfRvXvHd6Rmo1ODUbeNFFTBW4diuT7HDjzmYnvd4TU6oJPqbck52yFAR99fMx4Tks1TllSUCB4GsMVbBNENfZU6te-hXIB8XyJs" + ] + }, + "Body": "" + } + }, + { + "ID": "89b2ef62a071f7b9", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gopher?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRgapcSgK1GOswXS37Vynb9BRj0go8bNPOBtrFZ3TDr-nzIdlvKBQuwHtXl4Tlb-mLl-A65zL5Iw3unCEuy6lwtWl5-V7REtcIVHkrWLK5AoLoHqA" + ] + }, + "Body": "" + } + }, + { + "ID": "ca91b2e4ee4555cb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovWW5HSrWbcvQoWGU3c_n64wwcau06tjEM-fzdADFSbmmylG3VLjae6MRNIM4B9gDitCKfjo1_xPQNAZEmMYzRplVmdI4cIrBC8ursFSUywoCSJeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "eb678345630fe080", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrtPOErXGrnlQtXCV1PhgO5nz80m75Fzs7DoR-y8Ava3jvL9NDJ4L-i8SIV_tktR8VqCvv9uh-X1ltJCj3isa_oY8TlV-OsZeIiESuDrRTYueQWQP0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "d76ba152a62e7dc1", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk5XoKCyt74JBpeU8oREfzh3e0LLtXupBjcWtsFfHNZDEn4DjcmzWo6resyQ1gEisBp_STlU4hVaU3MbTyX3LM443_Gsu4ouZGdf3ZTvTzsLD_zNw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "f785ed7f0490bb6d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrCqpaad9mhxk7VZgTc8xIunote5-AHPp3vYkUbSU8uiVcq65rcnrbtIuCqJ63JTuVJvKw7pFwnLjyn5fL37JzfXLhmLAowAV67_Qqtxxhiy4WQars" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "096d156b863922d6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm5hbWUiOiJjb250ZW50In0K", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoivaSzjVdOm2KP4nXCF8VQDNHXmlNjm9rZwTaWzjvBmH3oK01QYTnHqZ5DhYRewu_1H0KoQgFpUIC-M4OKZoOhRHrq0-H8HLT__SqD1D5cmezrqHo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "0e96d304e9558094", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc-8JwtQprLtec5Ft6q3z4p0ooDaCCLmO9oVgrOXvBpoFhApztj8anfFwEcHdzpB6FBlUFqou0viF_HLHJvHolsxeCCoDOhqyIggOOlWjnSRg8XzY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "b4c8dedc10a69e07", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6ImltYWdlL2pwZWciLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpC3UBVkNENjfRoaL2M1fPRh-iTDTwCsgF8O3TOX_iBuXcDxMjOoTmrIhD4cE5g1s7PObP66TZB1lecFypnLz8hL-aowsOxUMuL0KXJvrgoGxKHUFg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "c45640ec17230f64", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrvqLsOaMe2H6Xh3MyDCOuHmLzU1fn_Zh6kHLkpTpn4EWj3nFkeROu1g-cABD151cTh8YmY798iEku-Q3TnZIMZ5mexmHiCJKdUhAolbSd9b_apMK8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "752ab630b062d61e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY3VzdG9tZXItZW5jcnlwdGlvbiJ9Cg==", + "dG9wIHNlY3JldC4=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaWw6jJMKNOoCNMXwJNVVsxqzPhGfMo-5g3krJ7A8_PSGTx4W2OakfmtDvzOpg1hexLJcsWunNHfyWVXsjBpM58dtQlM6VucQW4Uxndlb9RIp0UhM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "3f4d8fd0e94ca6de", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3425" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoO-NbHUptMgzQPx9zApWGCeqMgfkk2mt6PJl1iWbq8ocUYq_0ud8gPuj9bcBTBOBdqewKzSouddziC-BFLoOdQJ6ElkN136q5y39ZRDN4zq6UWLUs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "0355971107342253", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryZM2k28xNlFIDnXCH6SJ8RA2lDVNIx-5_eYRExB4-D1-dg_3fWvU3AREwoow4DvlYN4eyzA1AthWs5y0tYZEzvamBoZufkrXPmD64aT8kzAatns8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "8f8c7cba5307c918", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3448" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upnljw7UWMuKFeJUJp_gu-lyfzDRVTwgInddze_9zF5waX4glKKpcRrGpe50cvS8dV_xbbKLQa-CqAMVxlEp7qIHsmTdUR1amjrq0w9U_qrn4gYwSw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuOTIwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fc766ddbbfcd9d8a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3505" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlTuEeQHkdcuHKu_aWtuYjopXaORKyG9TF597i1ybzIPgJHE_oiKH-xYwG_D2txhF_Sv6Jzfy44Dfga1bINCpPLseDEeD_Ez4rvxx8baTX1uJ7lJo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzYuMjI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJyME5Hcmc9PSIsImV0YWciOiJDUGpudWNyeC9lRUNFQU09IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fb1e502029272329", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urm5pF1ghApp-RiVg_TAipd-LdTbF-hcDPcVRYbmj7KUIQxnsMUF5WOcrMA1t7ZltdvPZhyfsELISuH6DGJlUUedaCNH-B5j580GjBYLUKCwmAADeM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "c151ba4fab0c6499", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "\"-CPjnucrx/eECEAM=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:35 GMT" + ], + "X-Goog-Generation": [ + "1556835875058680" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "3" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur68mIGQ8VAtTRZs4F6ixZZYKcqe1oTqWHWqmp7gNF-81XKf2xX6eS4PcegbKx7bYnb4i6dzTCoLlbaJRNHwwLVzNbYMkGNy_bTHbWYTgtlBSKVtWs" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "e09c6326cc60ad82", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqldLQvGKuu2fihIdwh7CvU7W0yE3vUXu8jqefTPSMhlGgvR1EfeYFqjk_Zxj7fEpv7AjugvHnn1DmkJxJWhrZyM8b5gS4uJID92D_018h2YHH1r30" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "6c83b36cc6fc482c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGOUzu40Jq-_GztkQxoFN64MHcSoZjph7ivV8tqrp6ENxFIB0c82xqEdKKfcpqbJ2YXtXQgwflZgX-uiVOgGY_1c8SLoV7geQMfErTz1Q0NDM4YDM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NzEwODYxNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNy4xMDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzcuMTA4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM3LjEwOFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzcxMDg2MTQmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzcxMDg2MTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "e499f3b7077fb222", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Etag": [ + "\"c7058d15ad157573e6940c2b95c00972\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:37 GMT" + ], + "X-Goog-Generation": [ + "1556835877108614" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfEf1WUM8E5dSmOe1sOfgheagnlrcFUolvMMBiP6jDvPqoS7Sx0IwklM-DwnuAbBJPQ_EHhX-42njQu9vUjXqamH1U6np0Vmi0BngqFYOTc9MVP3M" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "9e67dfa901c4e619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYPZ-j39Sr1Whn6F3PYeVaxqIIjS_fBgSNkBO7f7B8RMWih6iCCLf1cPDXB3Br-0XBkdm6i9N9fGeK84_laVYWuJc0uJwSCXydk0bSWezw0qec5yE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "670f74ac2a0b734e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWwxnT2DHS1ymBhZJmnwxQiajAVYbOH62dyhX86syLNW_TlPnzYaM_7kACpN8ar5EeAHQ9fsMSHP4CqMcb6ZWg5KY3OkbPvfitdLalAA9MXxMFqoo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODE4MzI1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC4xODJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguMTgyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4LjE4MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzgxODMyNTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTlBDK012eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzgxODMyNTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTlBDK012eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkZuQnZmUTFkRHN5UzhrSEQrYUI2SEhJZ2xEb1E1SW03V1lEbTNYWVRHclE9In19fQ==" + } + }, + { + "ID": "9c6e7ad2d8e2c68a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoSrQtDkljyQ-aUJoC8m6nY20wZiHLTwh4znEZ5hyKxLQUrZIi7PnU416twfTwfPXWvd4cPGZC3NVdpJWg332KnUKSEdFPY2BQwLJ-_9S5eVdwMrf8" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "b5fbec9f26148f95", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Etag": [ + "\"-CNPC+Mvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:38 GMT" + ], + "X-Goog-Generation": [ + "1556835878183251" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTr8Qws5nL-el_ied6M7FYtryiLpFfhGNN-TxakIl1FCAOBJwMeq6_KBQ0l4UmT8VePQQ9h_S8r55NolBB1Ex12-iKvooCkTsNmvQ4rE4hOqQGbLc" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "616de25561ebbffa", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyD8XYw6tgOBbcxdUIVAw_vXrrryw0bgh-L-jRW0hyJt0lCHU9K26isn6tykgml7UgX52dgw65xD_ht4yxbHkY_VFl6MkEY6H-mWRx0_52xj49RW8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODk5ODUxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC45OThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguOTk4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4Ljk5OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4Nzg5OTg1MTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4Nzg5OTg1MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkgrTG1uWGhSb2VJNlRNVzVic1Y2SHlVazZweUdjMklNYnFZYkFYQmNwczA9In19fQ==" + } + }, + { + "ID": "2ddf1402c6efc3a8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13334" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrFitgTFIY6yt8kk3zprPGtWncWc7Ad6oUFVXVdE0O3QqK0qlYGB7RSw-dwN1AvMk4Sndi8gR22cF8qs88ZlmdAxB-1zYdUNqwArmpP2xni66xX-L4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "eea75f5f6c3c7e89", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "911" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Etag": [ + "CPDp3szx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uotyvz71ZfU1J_hl3NsDz9ldw92nWlIp9muxpBUsdv0nTyfJEqz-yQFyEwE2UM11wv7tkpLQfffGlKhlx7CVK0S1UYJnS2XQu2P0Uiz5bbamHxE520" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTMvMTU1NjgzNTg3OTg1OTQ0MCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMyIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3OTg1OTQ0MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOS44NTlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzkuODU5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM5Ljg1OVoiLCJzaXplIjoiMjIiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0zP2dlbmVyYXRpb249MTU1NjgzNTg3OTg1OTQ0MCZhbHQ9bWVkaWEiLCJjcmMzMmMiOiI1ajF5cGc9PSIsImNvbXBvbmVudENvdW50IjoyLCJldGFnIjoiQ1BEcDNzengvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "b19fb82ecf450b51", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcj3TJ6B5h4Q0DT7G_ioI_Icu9iv65m57OlRH6h9mKD9zZZl320H3QD6A7fNd1UPBSGGDZ9I0TG4_lSFa8JgkJQEVRGVRvVQv36b7ArCuGmg0loV0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "1f0a12bd00a88965", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "22" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Etag": [ + "\"-CPDp3szx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "2" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:39 GMT" + ], + "X-Goog-Generation": [ + "1556835879859440" + ], + "X-Goog-Hash": [ + "crc32c=5j1ypg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "22" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7DoEkjdrkGGrYpsCDbd2Y6c_Mu8sEA7cN_k06l4WjY25UJsijDFWSlSIA9SCi-ouLuI3if4Rh33a5n5vKcN3oSYjzK92eSPVCczEpQdILKFWyQh4" + ] + }, + "Body": "dG9wIHNlY3JldC50b3Agc2VjcmV0Lg==" + } + }, + { + "ID": "f649a81672a92823", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrfhqrYGhW8ew5dxbfg_DcMhaJh6k2oY_JqMwPRSDtS3Ef5kljeo8oTONrxRIAP8I0ScqxFCy7okNejkqOeO4Vr1TBZ2mc4MDjwiJA52ERSgZMUyvM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MDcxNzUwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MC43MTdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDAuNzE3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQwLjcxN1oiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4ODA3MTc1MDYmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4ODA3MTc1MDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "61763fd57e906039", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "129" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiJ9XX0K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13444" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7TRBQyfrSgWnsPL0CVfQCd5s_QJN9pRUHB1YttIoObh1YkzCEtuRxrwyKYtccpyodUCMmTVRyD6d0oDy-c2_Q7GK-_6OC5h_EF_1e9-t6E8VZCQ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceNotEncryptedWithCustomerEncryptionKey","message":"The target object is not encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.encryptionKey, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=entity.encryptionKey, message=The target object is not encrypted by a customer-supplied encryption key., reason=resourceNotEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is not encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is not encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "b45cb452c78d7b50", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq2YnotZtU1JMnJw10vtgVDeukiWK_4DXx0QFWA91CaCYLPXLDKzCzY8xqb6EGkVxvw731As27REu_hYOqZTwSfJJ6SeHemliLV9JgQYjfngxL0HFA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOC44ODJaIiwibWV0YWdlbmVyYXRpb24iOiIxMCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBbz0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FvPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQW89In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQW89In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7Im5ldyI6Im5ldyIsImwxIjoidjIifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FvPSJ9" + } + }, + { + "ID": "6831287fd0edbcd4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYyJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrXGhYaqbrodjXeCKtZGAt0hM0GrM8GBcZBkGkLJ7550-Iqaxp681SkhDDDOp0V3-xMmoq3rjWOC8A4XvJfAzgxGsO2VekCUJgUBMalL8hEAur49q8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "7588cdc84e3976f4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGBHQWnSHLUYlW5nck2w_3QN4x5uYmOM-GIatNzD1tqhzK0JOf7rBe1BKILpdyEUPlSmrLwrukcqJwHgfezE50OQL7ha9n6_fc6qUMoVwkEGawOgA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "974ed1b42608e806", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "34" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCtuc_GsAj6sf3zXpqT2_KZrdL0SEwsunv07k0DaaEHMj8gkX4kRCcm5r8AUrD9RNYjoKeceiCyY4pDN8nrHljshmEIXF2P29Oyd0KSuqDjsNeps" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMyIsIm9iamVjdFNpemUiOiIzIiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgyNzYwNjA3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYyIsIm5hbWUiOiJwb3NjIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDIuNzYwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQyLjc2MFoiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Mi43NjBaIiwic2l6ZSI6IjMiLCJtZDVIYXNoIjoickwwWTIwekMrRnp0NzJWUHpNU2syQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2M/Z2VuZXJhdGlvbj0xNTU2ODM1ODgyNzYwNjA3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODI3NjA2MDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "a2be30bf1e3cb539", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYzIiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==", + "eHh4" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CILrp87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqziYHpTkcbyMFRtHko_e04Rze8FHLbALw476U2bB9k_SqPwILBzxKRLzFvRN4q3RLt4xR1mpR2lgJ1Zq0BNYCefLOZsOeq4JbzJYLxGL5V799xkQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIiLCJuYW1lIjoicG9zYzIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My4xNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuMTUyWiIsInN0b3JhZ2VDbGFzcyI6Ik1VTFRJX1JFR0lPTkFMIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjE1MloiLCJzaXplIjoiMyIsIm1kNUhhc2giOiI5V0dxOXU4TDhVMUNDTHRHcE15enJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzI/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzMTUyNzcwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMTdxQUJRPT0iLCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "d570e13411ade628", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMifQo=", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3336" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CPCDx87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrltsLp6xgl5GQBA_ZoqyMKcRlP-MvWyo0epRXUAbOAkOUUpAOgezp4fFQRP5wEM1fq771AWUgtYvQ3HLXCnwxmDaCqJxEhICiYhxDp8RpvCnn3xuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My42NjNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuNjYzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjY2M1oiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnM/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzNjYzODU2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRJbkNvcHlBdHRycy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "284a06aa5d3f4c0f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2972" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqEuzT5vUHzBcQTT84B5lbxQRZ8Wazbvf7pARGdim0OKOWWM6MdR9KrH11f9w9bxrybc2YHzoveHnAwpEgFzwUcXlLN8xDttCt9pwOnPMX3My8utXg" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkuZGVzdGluYXRpb25fcmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LmRlc3RpbmF0aW9uX3Jlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "631a4dc25c1479df", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBK3c9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CIes587x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up5pzFgsk-trM6xfNGHCutAafrBhKStla4toQDjvEsTPe4TTesYnwc0KLg9WK95RXOKqdm_KUngd4hv6Tucfns_MALlJyx1s6A2cZR3vco6jKPTW00" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC4xOTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuMTkyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjE5MloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0MTkzMjg3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "e60d42eeeafea205", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBL0E9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3301" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ9QOOcNdGntWr097PRNnaDFbt2xarczbyHRwkwxSiwRZIhV26iZGbh5vHIpvl3Z5Dxc7hjtWuutHTMn8jpCTEowLJNAre2A6mZxVNtV52Vh6XDX4" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W2NIK0EvQT09XX0sIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHJlYXNvbj1pbnZhbGlkLCBycGNDb2RlPTQwMH0gUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi5cblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJQcm92aWRlZCBDUkMzMkMgXCJjSCtBL0E9PVwiIGRvZXNuJ3QgbWF0Y2ggY2FsY3VsYXRlZCBDUkMzMkMgXCJjSCtBK3c9PVwiLiJ9fQ==" + } + }, + { + "ID": "5a293e6162b30ef3", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiaGFzaGVzT25VcGxvYWQtMSJ9Cg==", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CJuDg8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgpXYBQOYHYXTQAVb7IGna0BRqHaB7oBdZ19frcgAVGErLbEHe3bESKZ7zKSO6r0AtvqwfCEuJty95EeD5MkwqIsXbr88YcTG0j7M-g4yTKkejukI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjY0NloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0NjQ2ODExJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "19f19648f1196c10", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CKDem8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAmKuvPxUsjTHDB5nMuNG92dU0gql0Yol2zlvPDWEDuXHQRLhOJzYBAH207Ot-_IB22pH5QSfTaTayqEWBUfkobCR9YGVa79W0LG4fv9fnAjxk5Cs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NS4wNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDUuMDUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ1LjA1MVoiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg1MDUxNjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "880416f9891fc25f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3515" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFC5_DRlr9xktR_eWpfuFqiZRbenVOATtUEg3tfJC8JbCqagKQzn0_sYLvxvG52ordbl2Nwo0L6Y0AbwuenLAo1uoX4uzHnro-JiY8MkdQEsO3uSA" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5tZDVfaGFzaF9iYXNlNjQsIG1lc3NhZ2U9UHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W292WmpHbGNYUEppR09BZktGYkpsMVE9PV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UubWQ1X2hhc2hfYmFzZTY0LCBtZXNzYWdlPVByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IFByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4ifX0=" + } + }, + { + "ID": "e1a7bef7c2f0f7b9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "341" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDioJ0fktjtyI_mABE4-oH9KqDlurM5mWiYDhoDp_LRJYSPvDxjuoaYzNhjoTHdHPALySTzCxScTstm27R-praR27EAm4Gx9k3wHAAwKJUptqRnFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfV0sImV0YWciOiJDQW89In0=" + } + }, + { + "ID": "df5371a2b0cd8c3b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "317" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaW5kaW5ncyI6W3sibWVtYmVycyI6WyJwcm9qZWN0RWRpdG9yOmRla2xlcmstc2FuZGJveCIsInByb2plY3RPd25lcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0T3duZXIifSx7Im1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIn0seyJtZW1iZXJzIjpbInByb2plY3RWaWV3ZXI6ZGVrbGVyay1zYW5kYm94Il0sInJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciJ9XSwiZXRhZyI6IkNBbz0ifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAVgetrpv1xjtKv2B7KFBdnbg20V-btiRn3sD4kQF7shWIQ1-FqJMRsbYfcYJyTMQHv_BhW_eVzvcc56z0LOcYfE-wbCRZpqYjen6c-bVcMzPet2A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "feaa4b1a4dda0450", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwMVZVQQ8s-ZBUJDLKyC-qMMTANncoBhRQWlMCITVUXQrTnFGGxu4GdKyVcuudrSa-DWy5w5iLE-i4a407GopUNma3xIq1eNiyVAziPD7jQ4YluzA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "23d94c8a09c0c334", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam/testPermissions?alt=json\u0026permissions=storage.buckets.get\u0026permissions=storage.buckets.delete\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "108" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoR-XKWu9nOWmFZve2-NeDyDvZDo5rYpPD3avVEmZkZ-lODvHCSR0D7zdPeCa61L5EtOIYJBqmC0D9Xc229A1GDS5d91vgAGuzRoxRnNa9DjBw68xM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSN0ZXN0SWFtUGVybWlzc2lvbnNSZXNwb25zZSIsInBlcm1pc3Npb25zIjpbInN0b3JhZ2UuYnVja2V0cy5nZXQiLCJzdG9yYWdlLmJ1Y2tldHMuZGVsZXRlIl19" + } + }, + { + "ID": "3d0a13fe961ed15c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "518" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIRjhruHo5rweyX35Zt-Mzm5UDD5vr4319gSNjtnJHD2RWtVLvQdjuZ0jv-XKT3s1xcJO7CvoU-qAehCknukI5ssvv2LgwazyhnkcuFws3isqM9uo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ni44MDNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImJpbGxpbmciOnsicmVxdWVzdGVyUGF5cyI6dHJ1ZX0sImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "c1f385994f4db8e3", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/user-integration%40gcloud-golang-firestore-tests.iam.gserviceaccount.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "159" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "589" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozEPu-BKQnmBUxTwsOWB7mmIPC5lppmUt6lA150OiXgQttSVRetVysUp299p7PbjI08qchUQl2idgMbfBCScDL5SuoHi_u9ani0DXYX2OV9QXAovQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "ccb3141a79c55333", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoG9XpooOFPlHtC8fg7llzYYETQUMEm0PvG4TbxBF5KdfCdB_sxNi0Yy9KrAmMDPBVyq0Iuaq7mzQOhQe9lXe5PBOc8-CGTXZiEMkMMJe3OYxjExIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "2d4244737318f2d5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn_UdEXhLLXE1QsYjwtyqQlzX3nDvpAO1WNpRkHhyV39RJKkxMBrcd8f6Xo3VZFDps7iKuQHnW49yRqjh42062lkisOH7otDXrdAKAih4Iz9fzERM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b63c3951975a766e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq-LP6kQLHNpjT5jYVkPzNXyfr838sGr4jGMVPq-FOq01ayCWvyWVAsvUbxp0NSdOYJcW-z0i2NkQLuSKGQYrX_3z8LW1Vb4uL2jxfC8kSiuz28hkA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "0764f2598b67c664", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGddSv5GSaZB7qBmb0PigbMbJmWs91JB0W99oOTcbMeUk9QeBf-7QF7W_BpAn3wNxqVD5cBDPgGuKrZCfYv77SgCGxcPW-jrXsUY5h50q8eFL0_3o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "bdb0ff507e4df251", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8t6bOWiJucB8KpVlIp8WjWLSUKssQaynZNguVy_oBM2ggBePUFnoteMcbw_L9aSHoD4hwi91dMaBC4aVc6VuaMSBSVuCe0L_0IymxhBIx3SRj_As" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "d2e66ae2cc0d11bc", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "COq52NHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqQ_qlqWPPh6t8-gwPlD5zRB96zsmFgzyez4LG3XN4uErivnyHcCeeHje_i95VLJcWYXn7_-knEn0faPBCCAIStf4fXGs_Lz_VzHK6CLBZjU-oBxcU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0NiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDI0MDc0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC4yNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuMjQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjI0MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDI0MDc0NiZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTAyNDA3NDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "89b2bc9232a865a4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "CJTz9tHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrymnpCwS6vZ0kM33ZD4nBupWs9hAuaOGpwiNcS4GJctgaLPe9CQ0Ada06yQV1aEfh9FtX6lgYh9jHwbsT6qdgOYOYe9yGDdAF9f91aE4y4VUHQHts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDczOTYwNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC43MzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuNzM5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjczOVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDczOTYwNCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTA3Mzk2MDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a70634faba29225b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsIKUUc6WyIHUlDvAIDlXVeOofVG4sVV3Rus8ktfPMTUTI7e4PH4657AoDnNOKKy9TOgt8yUtUvCNvDFQC1xwVApFofRhWT3kcjsEYRgZPCDDsUqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5539336327f2e0ba", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrH7pn9r8O1Zu1Nc2aNUO_CO75MyvOJyONGw27TUkfWRSSmFIWJUZ6F3e3OprVF5jKtAkQ73puZbyNCK6mozyHrmPRRa0mtyuBPhkYg4DzPGkXJOSU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "029ec3d54709e7ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqcpItVCw5LaIWvDckOjfoChIifJGtuURBAmgtpzzna_iVOfDSaQOhxiMDVfYkV3mKG7__z0WTNRdVpa2Zs2IPcIAKwqwSeIMaLtz--MluSCHv9kc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "bc0d0ed808bc8e68", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpljfpVGJewuMZUyJZ8DU7j2k6JmfPqXXgkeeu8vGdK5j-C_DUgpubS4nFEp1z8EsN-fUdceCxfiy-FGIJiFpME38hWnztW_WIn6zeSh6LCbwIK9oM" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "b695481e00d2d6d9", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "deklerk-sandbox" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmYf7b7ig2ase_O8qHEsgSAXifAlxVdDy_Zh5Qaqx1IlL1wkTvy1BSblI6ZDz3M2X-Y6KrdJp1IaP6nDU1F_Yhyt84Y7dOG80r2g-x4E7NAyyjV0o" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "2fe3b6cc96ffa451", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "266" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-S2G_BUo7MzXUj-7vUDuVFWfBIA5GJYfTdaeruyelzRGFimnhov8wRB_ozWC2cGPQ-a2xQk8bw_cVV_D2Q7c7rdR1ujaTM3oIjr8vjsJBZXa1VeA" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RNaXNzaW5nPC9Db2RlPjxNZXNzYWdlPkJ1Y2tldCBpcyBhIHJlcXVlc3RlciBwYXlzIGJ1Y2tldCBidXQgbm8gdXNlciBwcm9qZWN0IHByb3ZpZGVkLjwvTWVzc2FnZT48RGV0YWlscz5CdWNrZXQgaXMgUmVxdWVzdGVyIFBheXMgYnVja2V0IGJ1dCBubyBiaWxsaW5nIHByb2plY3QgaWQgcHJvdmlkZWQgZm9yIG5vbi1vd25lci48L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "59bdcad91d723d0e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "gcloud-golang-firestore-tests" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3lrNfk5bvup2RRbcsT6CeDACQitaiF3BTMyQ7B23ACtTqwWweq9ooOfZn3CX7tsbzljhlY_009nsTXXQwm7V_xNNd8FZwT44CzSpul-SZIRODC2w" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "7b7b6a79cd2a53e3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "veener-jba" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "342" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpT7Ge_QpcDrqajg6a4kZrmoA55ZSma9IM3pyz0Ow1bu5nLURc0V7cAThbvikg47_O-ox0uMdF6zBbtVtKL7olyMUGd9wqCQ9ubELVoJXiNMDehvXY" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RBY2Nlc3NEZW5pZWQ8L0NvZGU+PE1lc3NhZ2U+UmVxdWVzdGVyIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBwZXJtaXNzaW9ucyBvbiB1c2VyIHByb2plY3QuPC9NZXNzYWdlPjxEZXRhaWxzPmludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBhY2Nlc3MgdG8gcHJvamVjdCA2NDIwODA5MTgxMDEuPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "914bca19df35cbed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM7lBsoxgNb-rh81Lfe0tWFGTkXQYfEB12ofkKZ8SK8kK5bPRUsQMVhm3aYB2N9e5_QSBZA25my-t5LpgR8TVW1lNTGd97OoD7bdBhP_CGo3xMbos" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "164b4ffce0c7e233", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqbRkPvid4Zo9mQsbMXlTlOQ1womp_CnHUJcNWRamG_lp4ABhgVPVOdL8lE11J3tmvVkYhzcEPpIJuA_RTvuAbssXFsNo9Fu14cQ44HwhSRYk7r_dU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "d04d3cb2ac9d6376", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcH6PdfDpm0th9SnZgYwMYIiYGYz2VvNs-Nb0VYFbZdA4QXfOJiRFg-xKqmDq09HRt7j8OwTDO7UMm2EiqwjPz7j_JPIvfcCzfCnVVoF3XiZFomuQ" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8b3aeb1f423d76c5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoX_zzYo9AYFb44YmIz49k5z8v8ITzEQsDrSxEE50-7AC-ndg18Yo5DFAZ9ZFCDtT5P7kch3c0-YlkhX-5oYCUIm-mmPmoTg0CKWVI0TJ9la9z9-vE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "f32edc9a9263c0a1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok5lyOpE3YUZDwRz_bsA-E-xzWeqaApyadumOOFoA_YmOi_xnvFaJr6gfp9Gaktbpud9mv8qDCIvq1Yu7CyAVY0ScanSfNCke3naR_G7kPKYgrywY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "da73739950dbfa82", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdpofRnchjkCQz5LLoghzg-RHoGATH3xbgJVW0LKGBkH025w5EB5RlGmHHnmXuPtK_5Ffyl1bxjmoc_h7_vGvjcxWbHv9-NKe9fLO_oNYRmw-ffvU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTQuNzM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9" + } + }, + { + "ID": "94e697431c9323b9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqhr0guIL00afW_qpaRvxEGRSJEWTtfrZP2Tv8Vqt2xas1ivvvMHVbSpT12ltU7FjytFhno9SFXTS8pW1S0S8qJdFctUCa83tkcQRXgN1uPUwhX1ws" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuMTMwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9" + } + }, + { + "ID": "3c5d694690a75054", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12375" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UogsU3nh3dfRYWq9YqTmJc7CfCX1U0b-IrWpDCM7M4YwfuUWgzzzcydWdHLrF0UPqRxNFQj3eMHBbDuHYcF0xuUNah4g5J_nt26feHRp-moh7uFXNw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "e050063f79f64820", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq4hs_SY8_2HHPHu8NfVIkOdwmZz7h6XH0nWRSTQJSqzQZQGvv1p83Z6W209h3bXsbN9_ESiU3OIyLRwmEldfBP94hRhN5t1JnagcKPcMEOv8FtvHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuODI0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9" + } + }, + { + "ID": "f7edef926cdb7d75", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13231" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyQ9DLrBDGkx7uOkrIKrzMMYA4YygdFsd1si16n2Lr4I5Rfp1lxwadC3jThiJbE-cDyuZATHtvM2bzpsRDPJzbmKgtxYydHykiB-oCsSFqfwy50gw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "2745d195a40b86f1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGeWJRYTNsu57knuRFhwHuoeUMIJhUtuQj4PLhGDoFcITB2Y--7JgaOCVhepJ6a5RRKvU9d3UrAyrtbGNOVHyW2WAbN_KpfkiFhYq8l7HPERpLDjg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5258a70437064146", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqImoOCe_iDYCV8XHpaaEXPn-jeAf0M1MyixGju1tNCSTBIMEKDzh_wxVxapMmXWPnOes7YX0T0Fr663x7eJCl85B0EGXggUgp8bqeDV3cJ-mqeDrY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5cef2c464b7356c8", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpH5br-AVWN2g1nkkM_FJWZIjgOgh2NGLuiyYUS7QD2DSs6B-c0IQHgC2NUyy0eZq4McQhwxBGhUJ05-WeD_bX35i7skN7Jn_q3ELAaDwpi6_x3Z8c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "11cd5140a264e423", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2_tsxxuZC3Ni-A0zL0E9w8OVne0sWfLi61Cv48eTWj2R4F6G0AmXZU7J8L-WnDJf4s-6GpZV0SyQqrjBA_Mp75pJfWb6cl9_7oimTy8HPnxAtMwc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "dac6629e58d45469", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrTXUzCa6WOvWD6NqoHhkdwRtG0Z4h0QurHOuKU1gyAWzXOt8lc1NVsLqe-6m8siE76k7l8g0EY3Le273_4GFB81gHMhM4qBOOgbwSBgLl4zKHZspU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "1c8663192ed534e8", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfc2TzyVBAwGuVpWWQt7OYr3IQEmVj5R7LIr5LSRAhwFernkoD6TFVnJno_ria4LL8P9tSQgzCF7uJpStJTpYgTOiW5FqTxQ-AgbOjFmAMjah3o6U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "51858ae7ea77845a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqc5V7vaPL-d4aMLJi5QXyxZYWWtGBx0QuvfeB8rFlhwPGPFLQUABxm_XXkR0AOeIqdZ-intSu7I5srWdFKX85aY8r-z4oRTuB-CjGUe5uMiXQzXU4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "8248d1314da9a068", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqrg4CYWnZtCkMXT5KHCcQXY-aSGc6JuuEoVkTtKgObB9qr-dE-5sMKCrbTfpFudhid3ulDpkNuOUmRmBy4k7QQoRxqgues_a8dB85cOybZKZTM864" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8c1628782bcf7eaf", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqmer5wTHG4zziRJUR_QqUbX_xgytHZkMg2KFntbVz8pn55RFN7idAyYcqz3AhqthLD-bHH2Lggdg4MuIdjHVk_kKHxvDdqI0p814avmZPYAhJdvsE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "6813d64060667ba0", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsvYC9PE_CO5H9MncFfA0uYzQqFBOqu0_zdexcxDTJ22CO_vv1LiRY6ZSC49_FMeUZBA3KdMfOnS7DsSxBpqrLC9ZM6GAs2sFolmZKhp0RWJjvOzs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "76ac0290041d5a26", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-ejQ-G_PN4CMllngdiEh1SqHQNy8TXBA-htpazVSOadp1oThRsYsQuflX2kwkDQHFpeo1UgDPXqi5BRq8qZUBsM2koJIMOp-APWnB8T_qsTHlY30" + ] + }, + "Body": "" + } + }, + { + "ID": "0de0d173f3f5c2cc", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLOtWcLW6yZk8n72e3qkdl4ft2-s7FJFYyRv_5REkk9Q6d7qk6Pcpcy_HMCJFgKTV5RZzzIU3k5hceknIV1XNGVWK7gOCFimyeb8sFjNZC7y7rypI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "eb2af5bce5b9e1df", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqgaldc1OJvgmfZxcZs4i5d6EXMyFK93ZUJLzSK8Vo-qPYKRwhvPzK8GFv3gqrdyjBbvdc2Uz57nCwy10LhC5AD2pscln8VbaQIOAHqxvkwsqV27Rw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "ad55ad01147a8c62", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoQpkkPNbb1CGY8lAzUfymkmZ7PNlRK6hK3deBeoMip13ARTb212DIgf4lgUDM8PVQsAqcnKwzNulbZICoGzHwrsBZfyi7p5TGX2yJHLXSAGEY0MBE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "1ef9ea9b9bc398a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5WrZwygTHM0dDH6L1kypZzaAQtCwxt9wA7VXvgnjwrfwdRnTw0D7K3VLYLRTPDcf8mRFn5CetkcimFxfqUDoWLMF7bH1TLiBr38K6aBU74aRWUd4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5c1cadcffc6c95e0", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo_n__u-lZKgZqveqge9YBw7wQMldCd4t0io7l_L_nlcF4DSPBPvZd9jGPCstE1EYuQS9L-Z3Y53Q15FlqM1IcmRDv6bYEJQWqkpuzCRRa_TLh9P7A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "4a615256ab8c55b4", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpwGIlBhvugAIHxiObS0RPpJ40OY_R3exJfPh5IHLLB3SrdYtGMpD01QLdxc3E0IPGkBiTrwM-Z2X1gMcxuOqJWJUrhV4xTFe_n5G8bBxzievz39F4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "63c0acd95aa571a1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqNu5DFt-lpr-_4YX8LRIhoi3KVRYVgFAlG7WOPsY7IGJ2NLZSF3SQhmkvfL_IKZ0FWIwx1nGT6HBBe9t2RYXNE1kXis-9KgEaAWhrEByTud5QGx5c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3c6b1be88134f9d9", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqzTVWCTbjlM7VCFWFbOojrtt8ZMY9bRy0eXj0Uxj3gaYalvwqR0WBrtbX0iGQ-aN5pv3-Acc7EWIoHBa37YbMpx9fuoJUpTswmdpiCOH3wbMN7tIk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "60cde48b740e9a63", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrLyiKgPsp1RDgH3Q2SGlQAwTvGJwVDvb9U9FfvTysIFo6sO-5AoXrGPQfZpr80aMOEKojmPksURjhsteWSV9gdPsOlPIJMorxvQMHeomMCh-Bgmuo" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "95b915570e7c2375", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOZjNI5VbLPTLuBwhSAdcgxvqgcFpH-ED4XyEgIbnndmtC_zJ0lkLfudymSRpdRE4NBxn1Vp-AmZnb1ONsP2I1GBnLYfpYRbzFQpmihPrgBkDnaB4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "e4170c637b705043", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXQonYrNQZxcXoDllGz19Ruu-Eq1_3FVjMndISB0h1O1ojodwFr2Xor2nZHBZgv3NN_YOSrunnT_KYTfBFNdqtVvqVC2L19Qhwk-cxse_FqrMVbIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "a3918deffd2f8f0f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpCCE2tr_4iWFlwmQkFVvzfskSWvZdvohQYV52oXF-tWUfie6Brp5TYrvUYx7jH8zsTg9qqk_kv0th8pOA1WsHahBUbDYuaqAmW0EpSJI6_xjBPdZ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "42a941620757dac5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoqov2civPKG8oSKmpVgUSoOD4W3rev4av_O6jgxkc5tSpPMgtxZeG3NznVRuFJLgClao2zPH8BvZIxTAAPUi_R-H0zgPz6ou8k9SMzLrcgW_HW100" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "ae79f2e22d58681a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqP_EhJin_8uqVkiSwAv0jBAI8A0o7nSwNAQ-Z_EdAnq2PItIGJZl5NOE_xcDTFce_ncHfsf6LvV4NMvuxiOsl32BOddZj_6x5dC36QqrKvhXKaQJ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "99becaf4890b3c21", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWMu40YNkuE3QcNByTmrKw_cnq4a8DVtcknBJ7gTnLdH1BC6CqoFOStPpa6y4PMmdpLpquozlOzvI4NWjgZYp1d9F_wBEyaq-wquTQAWGXNy3qktY" + ] + }, + "Body": "" + } + }, + { + "ID": "586ccc26d5a22712", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsRO9GedosQ-kg2lCQpx2S6C0-HH592CdB3SU9yGMwEEGKX9md7KGeSDhecrm_6Ug6tyYtEMlsMq08YBAKGprXfcN9130mRolu_HVey3CcrXK1nZ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "fad4bdab588e0f3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUefKoF0asUvxzmAtX5z5m_5FRy9UfliKkjQN8cYyrHAotGwiQA72QN0lYfA0HXbe7gFRokwBAY8R5OpHcEdRq5K9sw8AwHuNtruFtBMxjN0D3hR4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "23bb9ce53da2f525", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upa2KE5JJOXq2FOn7RHasJpOeeHqWU2kk3BC4cQcGQ9jGzh6XIksN6Z6u1NVoCbxW7-3zGiZMdAop-oa_EuyPAauD__JgzNPdngeNeXO7lsGaVI6i8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "46482e47e655836d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urze42blCAS2Dskbf5jkbCkdo5IhfOjcTlcufV1ooZd5x_QD0zK1gRoXLXfvLjKHEKaJ7cuDeSRmNjuqgEoxJFMmXUoPOIetWLYbH0G-lJcM6tydew" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "856a1fb5ffdb000f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFSnYuPlv_JejZ0nEjoWRXeLwj0MS-ddTlIaSOjaK5BudyIcIOeVDEeyFYWsF-GCZzMC0F7v-UEw0_lbsYBXCMZcLp6WZu_s3OPDREhpz2s3Txw-s" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "a74c3e1b541a9ffc", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ury3MiMZho3xaSHmFgwHNvKIHTa0_gZzfKvX8FW6iVGNkdSbXvmVypIUuIoKXDjd4rOXpFghdoJlES9A_iXUvZyfWAZLvWqR1w6sDRymrIARVd0dPE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "913da4fa24ea5f1f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKAxObyNBS_Mew44t3k1u80cLBGQW_I2ohrF96rIjZRxFscjGFrvsOHfLxw78rTRnpHmfA0uXnNz8T2FPc_Op16Lnn1YkfyOezSEhDBx11qZbinEE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5dbdf24cae75e565", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoJgRZ665WjiUif7SnQ-Vg4B0kJl-UQ_kIk5XgmcQIL5zmCBp3NCxjksybkvNKpGcQnIJSLevfyWd2fugRv5y5vxmEBgqFffIy8ibTIyBDH4b7R74" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "f41892a142fe6d2b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpThInLTHhtepE7J4k-Rg6OoLRyOqnjPz4sZmfIH6wp5TSIyLrZQhExKAP52XbwUwMYp7x-xypSMY5kfR66I-dmpRWaFkfs0Lhy-Z6kqb5JUXSppqk" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5eb0710be2571dab", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbetUCGwYg4rhIR-DP3ntPqbkFwSqutNltVR55I4qedP-rZ5hC7ospMy6hZ4aQaNOhNv0frmst8MQZ0j6Mp8BcTC6qy7mU8bhEAiMGLycR5XxUWfI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "56aaafb0a24b0644", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqQczDm-dzjgTlLFcJ2lXQrMTpQ_8UP-1hNbGj7eZEgZcbxWA-oyXqEe4tJXq3gYdZ3mEIcAzaDtNeBHWidpIev7kdXWank04_JSWbQtU2UomJqeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "3bbc40d778939612", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5kJWpLwxz76BKPll75-nOsLv_uoh7X-TW0SbNHCYEzkGq8VZ-W8a5tKGUF6rKea4ejr4MhcqJVr1Zu7O-zNAfQWJMsHC9CTkOrPa5Iu1s0KBJDdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "a2868e9e376e356b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoyZfAJB3TroGxzYv0gy7dHUwpU0jtIeU_EyQSFQKzHlHbyRa33r_a-B1aHw5tHaoawuDvv7skisdx10yhmm9ZRrPw0ibwpMkX5Cwhf0OK85DF84jM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "ffb216dc74ccc27e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqBumxgw_FTJlt7J3xLsBDxljAcEyC2G5aQqhEibawMttB-ogPabqDfz8ZN65WtOikIggZQ8OqW8zigV4Q5vfQkog7NMasAuivcMjhAsgqv_6qWv_E" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "183157f40c6c1bcd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrC6xxVQeIIjitNwDv5PhDUIBQbtGdHJDi_AmLs8vTbGzkby-5hguMIwI_UeHlCuGF1u5jBoVwCOgSqJBiJJ5fxh8oCzBn-nHGcGAdIOeQt7gTL0RQ" + ] + }, + "Body": "" + } + }, + { + "ID": "4b6b925053570053", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpiIBwh04pJH1-X9d1oJluF_2ZCJEFLzF4E7SI-easf2noIolxavCbpH7NP9GMmNXnVK_ZJm4kW8coV5ArXDr-Rwodjnd9Bn2hFiJ_puNeWWldJ6mA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "9b4f9c448170e89d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHc1VVBdvXFq9aDPAprpXNP5xPt-ZtPdYXwCQPHX_s4gkCMom31AtHeDocV_hker5qryWoYnRD8PPu5c7JqDNQ75v7GgE1vO5xAt7AqWPoLU5MO5A" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "692d41cf9ea91eb2", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpuFB_mdJg9Fd7Sj06Xdcrj7Up2xXVc_P-SgISEEtiv4v9doPXzNbzBz92Q2UfEchYWSQnikHhS34dUAOefj9J4vaBYqjc9RjFmrn8YcPNyS8aWKgI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "db035a3cc51de007", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGb7BqO44vNpZjBnLrSI6_jVCVRFevSqm5eux2DIE2zmUY3sH0T0st7OOsLPcBjvLSbDfijMcdJwsgpM1fQGcerB9bS8Ffd4Qgzdjth0I8FreV4Vs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "e1d95794cb6be989", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVcH2L0r9wGab--fnRuKptLRoUoHCUHfRegh3rKNQExHVsYLnt1NFwOKFa_6hRbeYXO1aY8-F-KiD6IY3sWykIKXr5iqelEJHTWEMQ8wHuzIcWLO8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTUyNjYyMTQiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuMjY1WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1LjI2NVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS4yNjVaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1MjY2MjE0JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTI2NjIxNC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "297f13e741cceb04", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UroLXQs6945-UOUyN7qOhICUNnGSzCVrNCv5DD9uMyvgD08zpuufBZj8WM8Fxe0DuOuQowqbPEn1UdJdbFTW7puyeY4zX8fM6UgT7SZIVB0Xi-B0OM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTU4NDE5OTciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuODQxWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1Ljg0MVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS44NDFaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1ODQxOTk3JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTg0MTk5Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "9617f04d2dc95a7c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12683" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqeJiJeDLLZgn9vlRyNk1plY-7dcPXaz47gPNAhMqmSx5ZMvg6pqWj18PVUR65UGuWE7Amtua_zqsv0dVyDZE2FFceMeIQcQ2n6noklvRS9kxPXcEU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "2309e069a65c894f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxhvVZmlJMvR93BoI0JsL0TYbDN-kmlYHQOOlRfb-Nnqpc8iUImRtJT8Qvx0XWrOsALmLv4Oub_aeCCWkHPvqaF28oAazlhqRZYoxrPlaQlW0N1Qs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTY2MzcxNjUiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTYuNjM2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE2LjYzNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNi42MzZaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE2NjM3MTY1JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNjYzNzE2NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In19" + } + }, + { + "ID": "d2fe101ea1da654d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqjFigs6aYtqXW_7VucOIgJY2MQZwuLv9B12s2ffhWgWDxtJyjLinG4A8U9gKBCpiTt9jUvdHKtaG20qZfyfN9Q1eVAkUQh_zv7ESbqnWhsiheI7zw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "6620f07aff04b6fd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Etag": [ + "CP/B0t7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyWMaddvNz0IWnQ6KIl-tv6fJZCvrqTlGuaoHvKqXhxb3O1EKUIEirKSVcaedFZyxtjLhPD7Nj6qf1CRGWG1VnVigo5Wf3sYFDuuLvctxVpoHT0rY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTc0MDY0NjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNzQwNjQ2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNy40MDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTcuNDA2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE3LjQwNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTc0MDY0NjMmYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNQL0IwdDd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7f88847ad9c49799", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Etag": [ + "CKav+N7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urtn69BB0ytbCZpuCbus4w__tiLeBpqeNzD2gKl5KN2LgP9ZJOIjudGKpDOJRhyW_WQjVOCJBC1CSB_AgE0MdPzVDAKWt13VfQ8aKe3pMWh4y1kOlI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTgwMjY2NjIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODAyNjY2MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC4wMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguMDI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4LjAyNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTgwMjY2NjImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLYXYrTjd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a21f3ddca57ed424", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12267" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrE9jrLJ0W1PUHWT3ldYhVsL9nNhAeFPU2K8g2rCMimty07QlN95esxj7dBAySQ-nzvK17L3WA7vjmxjCpeQADa2bztj6ke8NZjb8fuWkQHsnnzKXA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "08fda3fa173e6e11", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CKy2sd/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqfGXVJ2-MQaZWJ-JS6runpSycA3GDKIXYHDGf3VBOXnADbUzL2YhZVmlQcW_95jXY0eMkK4Z5tSRk3cxGP_84T_1t3YDYQS2p37ZouBk7GfX-4xxw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTg5NjE0NTIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODk2MTQ1MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4Ljk2MVoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTg5NjE0NTImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLeTJzZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "0c9e19bd4ec37de2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13123" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGREJ2AZDcrZiJzPX5ltwlB28vRYhZ_xv4LMIO3La2IgFWk4wd5IVrqBs3z2epgtg7wkPpZm4qPH9zTzSIsXNxdLLHcNMMYr2azidefxtJVgXqgDY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "84daee26f7d989ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CLmF4d/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoLuw-z5wwTKy-X92_i7pyTrimolUNRUGrElhep6rUH_mmpgu_Vxp-1ncr8x8XNXHLs8NFRW5S7WVUsaDRRTrC5KBPea4EoFLANK8MFY0FpTkXr7_g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxOTc0MTYyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOS43NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTkuNzQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE5Ljc0MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkxOTc0MTYyNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MTk3NDE2MjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "8457a5701f4ced08", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKF1M6PpAT354ON3809wMCor-B9KITRuEV34MQczP3OD4Co0sJYtNSub2FUzA8qanTDrtANRK0RuxPTz314o3HQro2HLOoyKZBow5CZ4sfsunKHv0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c2ba8a7670e2218", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Etag": [ + "CP+SgODx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoq9hgjjwBbSH1zXN2UlNtWO31Qf5uk4ijjGCQlETB9_5H_CouvU3q4tyHAKEbIBlaW1_mNk3Fsp6LgXqsPPKvFzs3-WHeADAgc2-GpD-0inoYUHMs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDI1MTI2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC4yNTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuMjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjI1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDI1MTI2MyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjAyNTEyNjMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7e6d2f97bd881584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohQH2nTuRg_FtGhlfr9zC26C6lKFA-aZTf4GbiV5kWZCJ9E4YLHfIUKLpCLIgEfX9TAvi4oPNWfEBpbY-orlescLxrxYcB1U5fLmzzHQR2R0teR-s" + ] + }, + "Body": "" + } + }, + { + "ID": "48e189abff98ea93", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CM3Aq+Dx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq_NscG9s3iq-8NZq8vQhzdc9FJMaAfq_0GborrX84b8gG2zpbUNtEVnrjFjLyOXqBeJwthPgPUPcsGbIcD597LaPlxt-VNaetM2BLtMtYry8pPeQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDk2MTYxMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjk2MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDk2MTYxMyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjA5NjE2MTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "02d6922c26c06cbd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmjSJHhErkbzi8feJ_-8XOMqsIzGrEe6frQzWXQ7dPl0D8MW1mMHClKdwb2zBOTvpx7mXQWy2STZYSqx2o7O1Tr2DB1RZfwCZ3xp1jgw3NCICi7Xw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3fee93fca0ef933c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CLfH1eDx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLO91a0iSaTsK7EA2iN621UX-atvJ-8m8BeqeR8_o_CQLB8Pco5FMXznwQY7DZ8u0GPd1BvLE98s5S7T1Z6yL66OKL0CFFUntLBqNAKlji2Dcme6g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMTY1MDYxNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMS42NTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjEuNjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIxLjY1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMTY1MDYxNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjE2NTA2MTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "5c26cf93373a296a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urx3fvI3sQc5JiIazK1OQw5W12poVdjRyBKxIBM6N98jEJli9F0O33KVHHcHaP-QybHub31QmLNngZpzcVKJq33DSaVMW2Vpi5BUaVAAUL2jMoyOxs" + ] + }, + "Body": "" + } + }, + { + "ID": "f8b5020ef71e4047", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Etag": [ + "CIungeHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8QEz45d-uPa7qZ6Nl_SMqw2iepQXoM4aAkTx4YCqArXnSKI1UO3ZhJ5-_PDg3Uq6PPa2JDBTbxckiZCRMQvf0Cv6E_8Cwk3jKXGkneWFJYSeMAts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMjM2NzM3MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMi4zNjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjIuMzY2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIyLjM2NloiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMjM2NzM3MSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjIzNjczNzEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7cfadd8c1e7c0bd8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpjVnPaklKmbO8qV2lapwEIAwPjvf4HSJs3kNHHO_xZH4bV65pVPLKCb2qjMPCYSwEKRZzH6NpYjm6WlxsBdJnfBvpyRpIR017q9iJDyBcVPNUoDn8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "b1381b9995cf35f5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrD1hmfKK3yAwFXKqIwvCsEVMb_e7MWysJc7zbyVjZ_sbD9P41lzc0gYSef9yl8CRa6B0RtPq14V5MBSdy6C0i_cAkgANDN1da3O5QKELvACU11moU" + ] + }, + "Body": "" + } + }, + { + "ID": "5e06ccad06bbd110", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2FRcf4QGZH2KhkP3kATb9eOy4S9lgrA3NQXYqhc1KBZDbcZN5NHF4MB6WiTxOraNE0HhkW3SOg8sp1gS4kAcWRZ2G0EGWmLgL6RUcwr3a2O-WY10" + ] + }, + "Body": "" + } + }, + { + "ID": "b637ccaa36faa55b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGzSAdUK8pPs53JjaSJVQJObMJRsfQ5FIpSL0D4tNfft68cfE8Xb5fjSsAQG_PfmKZyX1Ind-s6dW6FgwREeHzu_OnEeOwCOaT8EZi2N4vNeWLvac" + ] + }, + "Body": "" + } + }, + { + "ID": "15055c361faad5c1", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwmXVRTmXX3P5iId4-EjuA1oviIQD-Vbdr7M5rgd7d5IxNvmPVkZz_zFyMBNoHd_DzXnPMN5R7grAe1yDdOzbyafIBwRcFsLh4r0a7ghp4LkNSLZk" + ] + }, + "Body": "" + } + }, + { + "ID": "307f8a44d4ce6e9f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaHmcEJBKGI7CtW9fK8-j5Z-oRk_dTwMS40wVaoNH1PN7k6x6d_9aD3w3ce6WGZJF5OYxkdoifp6L2vVz5lIGjzcmmIZ4Wxp1vktBOv8hLa417Mp4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "f8df88961018be68", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "121" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "297" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqWiAA6LvnU4rExe2rVTwwFd51SI93o_d2tOj9OY1jk_qe1Yd1eiTXIa2eNecJQsmgwuFr4BUQoWy8ndutrCqGucmVIRL0jA6-i-vaGGVRF8uEe0AM" + ] + }, + "Body": "eyJpZCI6IjExIiwidG9waWMiOiIvL3B1YnN1Yi5nb29nbGVhcGlzLmNvbS9wcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvdG9waWNzL2dvLXN0b3JhZ2Utbm90aWZpY2F0aW9uLXRlc3QiLCJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJldGFnIjoiMTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm90aWZpY2F0aW9uQ29uZmlncy8xMSIsImtpbmQiOiJzdG9yYWdlI25vdGlmaWNhdGlvbiJ9" + } + }, + { + "ID": "45f44fa6c3668273", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "340" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFWgB9V-ilyDIBP7KEF3SY7uTg-rikYE1ooEgWYkiXb2cW6oOwwtH1W7oe24WU2A9oyJIGOSskz3_icHqmMn7CfPoZQ-Lu5QdtZzv_5062_-UzAt0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIiwiaXRlbXMiOlt7ImlkIjoiMTEiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCIsInBheWxvYWRfZm9ybWF0IjoiTk9ORSIsImV0YWciOiIxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9ub3RpZmljYXRpb25Db25maWdzLzExIiwia2luZCI6InN0b3JhZ2Ujbm90aWZpY2F0aW9uIn1dfQ==" + } + }, + { + "ID": "08c2546e2c262ee7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs/11?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmXr5TZZPRnABz_XbbZpjwU3iPz2e802RkcmNFr3sLzUx205tdmAb6vVWMNlYMUGZ5tGoddlqQ_oPnz0Xdvpz8CiD2eDLMDyrMbxVFjc3BinwBo0" + ] + }, + "Body": "" + } + }, + { + "ID": "efc8b67f9c2f4b40", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UraMBYT56qFvKLe_OPJfpJRhTy2qSpQyRjP6CScOnnijgaQ88eQtVahktX_Vsvy-G7J11d7GNT64FnpqZrewu1sUmuNNLiEtmkqVFILdNcWed6i1w4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "6dd92f9ce3b15a57", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:25 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpedRCcxIOhXCWdipyV2E0R3z00CUlOK6rPlof1gpKuQbLeJmvMoPFn28o8zqmmeVJ5rbX41bB6Hp116-_ISgEXl4Htmc1VS0Aq41lJQiN_mIvozbY" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "914cb60452456134", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2F\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "12632" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpLE4EijIrGqoHRLrCqmprZtxBU2JtHLLsZ-nIakblnuZX8s6FZA1SEaczyybdjB32loN1-gVCf83PFM4x06S24ATKp9-xRgQ9QNC9fwVH_ec4e9JQ" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF/1475599144579000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF","bucket":"gcp-public-data-landsat","generation":"1475599144579000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:04.545Z","updated":"2016-10-04T16:39:04.545Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:04.545Z","size":"74721736","md5Hash":"835L6B5frB0zCB6s22r2Sw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF?generation=1475599144579000&alt=media","crc32c":"934Brg==","etag":"CLjf35bLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF/1475599310042000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF","bucket":"gcp-public-data-landsat","generation":"1475599310042000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:50.002Z","updated":"2016-10-04T16:41:50.002Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:50.002Z","size":"58681228","md5Hash":"BW623xHg15IhV24mbrL+Aw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF?generation=1475599310042000&alt=media","crc32c":"xzV2fg==","etag":"CJDn0uXLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF/1475599319188000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF","bucket":"gcp-public-data-landsat","generation":"1475599319188000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:59.149Z","updated":"2016-10-04T16:41:59.149Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:59.149Z","size":"56796439","md5Hash":"FOxiyxJXqAflRT8lFnSdOg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF?generation=1475599319188000&alt=media","crc32c":"p/HFVw==","etag":"CKCEgerLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF/1475599161224000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF","bucket":"gcp-public-data-landsat","generation":"1475599161224000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:21.160Z","updated":"2016-10-04T16:39:21.160Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:21.160Z","size":"77149771","md5Hash":"MP22zjOo2Ns0iY4MTPJRwA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF?generation=1475599161224000&alt=media","crc32c":"rI8YRg==","etag":"CMDW157Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF/1475599178435000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF","bucket":"gcp-public-data-landsat","generation":"1475599178435000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:38.376Z","updated":"2016-10-04T16:39:38.376Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:38.376Z","size":"80293687","md5Hash":"vQMiGeDuBg6cr3XsfIEjoQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF?generation=1475599178435000&alt=media","crc32c":"uZBrnA==","etag":"CLiT8qbLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF/1475599194268000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF","bucket":"gcp-public-data-landsat","generation":"1475599194268000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:54.211Z","updated":"2016-10-04T16:39:54.211Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:54.211Z","size":"84494375","md5Hash":"FWeVA01ZO0+mA+ERFczuhA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF?generation=1475599194268000&alt=media","crc32c":"Wes5oQ==","etag":"CODCuK7Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF/1475599202979000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF","bucket":"gcp-public-data-landsat","generation":"1475599202979000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:02.937Z","updated":"2016-10-04T16:40:02.937Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:02.937Z","size":"89318467","md5Hash":"p4oyKHAGo5Ky3Kg1TK1ZQw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF?generation=1475599202979000&alt=media","crc32c":"pTYuuw==","etag":"CLiZzLLLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF/1475599233481000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF","bucket":"gcp-public-data-landsat","generation":"1475599233481000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:33.349Z","updated":"2016-10-04T16:40:33.349Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:33.349Z","size":"89465767","md5Hash":"2Z72GUOKtlgzT9VRSGYXjA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF?generation=1475599233481000&alt=media","crc32c":"INXHbQ==","etag":"CKjykcHLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF/1475599241055000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF","bucket":"gcp-public-data-landsat","generation":"1475599241055000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:41.021Z","updated":"2016-10-04T16:40:41.021Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:41.021Z","size":"86462614","md5Hash":"8gPNQ7QZoF2CNZZ9Emrlog==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF?generation=1475599241055000&alt=media","crc32c":"uwCD+A==","etag":"CJiW4MTLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF/1475599281338000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF","bucket":"gcp-public-data-landsat","generation":"1475599281338000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:21.300Z","updated":"2016-10-04T16:41:21.300Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:21.300Z","size":"318887774","md5Hash":"y795LrUzBwk2tL6PM01cEA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF?generation=1475599281338000&alt=media","crc32c":"Z3+ZhQ==","etag":"CJDt+tfLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF/1475599291425000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF","bucket":"gcp-public-data-landsat","generation":"1475599291425000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:31.361Z","updated":"2016-10-04T16:41:31.361Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:31.361Z","size":"44308205","md5Hash":"5B41E2DBbY52pYPUGVh95g==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF?generation=1475599291425000&alt=media","crc32c":"a0ODQw==","etag":"COjB4tzLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF/1475599327222000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF","bucket":"gcp-public-data-landsat","generation":"1475599327222000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.159Z","updated":"2016-10-04T16:42:07.159Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.159Z","size":"3354719","md5Hash":"zqigvl5Envmi/GLc8yH51A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF?generation=1475599327222000&alt=media","crc32c":"WOBgKA==","etag":"CPCx6+3Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt/1475599327662000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt","bucket":"gcp-public-data-landsat","generation":"1475599327662000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.618Z","updated":"2016-10-04T16:42:07.618Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.618Z","size":"7903","md5Hash":"el/UdDvWR0hfiElvrbBcUQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt?generation=1475599327662000&alt=media","crc32c":"PWBt8g==","etag":"CLCfhu7Lwc8CEAE="}]}" + } + }, + { + "ID": "3f3f45cd2b718e17", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/noauth", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "247" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsZlmkTyf2_fqITeaIiM8s2MLUvz5qFiP_rzA4Mf6Q9LMxsiQeP-GBRwHON_XvnG2qef3XL1EzgTLA_GxtLKZRjdzOBndJPf9XbJa9KjJCIPlducQ" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+QWNjZXNzRGVuaWVkPC9Db2RlPjxNZXNzYWdlPkFjY2VzcyBkZW5pZWQuPC9NZXNzYWdlPjxEZXRhaWxzPkFub255bW91cyBjYWxsZXIgZG9lcyBub3QgaGF2ZSBzdG9yYWdlLm9iamVjdHMuZ2V0IGFjY2VzcyB0byBnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm9hdXRoLjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "110cdd2daefc6162", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoibm9hdXRoIn0K", + "Yg==" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30405" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGgfg36ZmqdKbc0NqBivwXULVnF2NuBAD31rJqzC5Rj9xgHFbRnwxJCbdxCuUdlAhGW_-H88mBadNGmqch0fmAyAd80HPANBkUmIgkolsL4HK7XAU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth."}}" + } + }, + { + "ID": "848c7013eb1665b1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqbrgn51bDFbwZ1c2_BxCHhog7If9w6ooKAtb2YCerQcObpiFJZqT3-Jn7zTXEEPVuysmxKw4PmvEOmkbCAJVkmbEVZm6z877JKFhrXTrkWqWYooq8" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "a5f818071a0f22da", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrozB-0-kEtkjITLFZA2uQpw77J_Zc6GErrYrIyxkPTWeUHJNBRLw4JXhyIEFG8szc7bhqblQVKoKYiZ4myOcfL5zIM9KYGIbCyAP9e4sEEdx2pBe0" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "55eeb942e5603431", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=1-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7902" + ], + "Content-Range": [ + "bytes 1-7902/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlAytrsQn43Os8JxSOx4C8v9ApqqtqwEE7kZ-voKAcmcYe32lG7ANHxzNrwkqN8bbLLohoAHd88brZDVaC3U6Q01dhBBoeDFnlkCzHKUJjA8ZWrgM" + ] + }, + "Body": "ROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "320dc27adc377057", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=0-17" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "18" + ], + "Content-Range": [ + "bytes 0-17/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovperPufphUvaf55r54Wd-USAbL2ZQVTseICyulStqI633iJcFBLryyqecsHQcoU2cXp4MsKgB8uQu979IXcnv-aGNm6viDydIrqmPqA7SmPElPGI" + ] + }, + "Body": "R1JPVVAgPSBMMV9NRVRBREFU" + } + }, + { + "ID": "a20a4e3b35dfd271", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq7491k1eCc7fe_JApsP1zDcSyo759KmHvh-9YHm9ekpOGaG8v1bZNPjaMEkikJSDYt_LkVMHrb9HTDx9vvDGy3Zm1kPrlxS4933Sw-Wdh35lDomi4" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "d1941f2e08f3bc52", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYIJ-m0VbRdjw7ZGI_0TiIfj6fcuhJkomWPdQGApFxH2_LnekHIdv7igEpJAM3a-zrTOzR20bzvc-JBunTe_-f_Hsyxz_VPxJNrQY7PSrQ3OMfEhQ" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "2451a87df39e1241", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "W/\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "Warning": [ + "214 UploadServer gunzipped" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Response-Body-Transformations": [ + "gunzipped" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNfvi75DBdUL5KpenQbqbsYr5A8YSQp2lGAPIhe4FlijJP95WctTUrdrLIyq3riprP50HzntCuw7zh5ycZfqbJBkFbibeIwEp5bVDj7yVpJbqctiI" + ] + }, + "Body": "aGVsbG8gd29ybGQ=" + } + }, + { + "ID": "f0e47a86731e8924", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Range": [ + "bytes 1-8/31" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upzmwe6UNntf6SxdYLZTz0aZiFJ7UANU6y7I2YKJbADGSVoCRe63OoxcC4uh-9n2JEnkvQgq0dwCHCbQ3qmYG4cWFEovG_fIMKFx6O11iPQUhLmeFw" + ] + }, + "Body": "iwgAAAAAAAA=" + } + }, + { + "ID": "2a14c414736e344d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgiftU7BDllYqI1jhKj6RbZME3LoakRb653ThaSSD_kzX1O5odabBcDFfG-oswR6YOcDZTW174qxbUaa2G1EvajUt_wJAD3ViGgoMwT_YjoTvObkc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC4wMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "b03cb69547c1a6de", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "99" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiR0VUIl0sIm9yaWdpbiI6WyIqIl0sInJlc3BvbnNlSGVhZGVyIjpbInNvbWUtaGVhZGVyIl19XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urw36CL-_RvF58GoVX1bJlTZ1YUUdWXJKQDIac7eTOYj5s65KYFsk2ndonwA5ro_9nGPfo4qRIYyt7J1Xxb01TiJUQPRXzH-z9T_S5UQE6fxOssN0g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "735da673f622341d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urb-FeCpjdYOEj6HdkFXRMfVXffmMB8xjLqLwPvjgs3FghXs7r94UrXEpX0rW_CgEpjG6v6iHCWCkwxib31kFkLPMZMMZxHTy66dPT2AdBDsIE8Y8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "97ce23a7211781ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqrjU-mu7dGgY6-CuNd9AwgGuKtQ_O589el9eyWSAb54cDYnQN74dl8HxRVzIUayStgIinEuuux2afAlvkUGm9lhrefX8nugBXNL2lGluNcfQKfRo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOS43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "a8f5b1c42d7a3c5f", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "12" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ4q-BeojZ4WgF5ZHNzJG7sIvdOxmprH5GqoQ3DM-QllSzHWOwAfhVdJZH7WmVK5Dfs1TQH-pKgneNLb_9uYM-rDFb_5Hh9vs89o4pOPT9ob9ezTE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "cab9472f01ed4bd5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo0__TFFuzUsEi6Eck0G1lblGy_1abmyPHERWBavjN2GHncyWWnKQ6yYaFDpDVQ-kfBS15M6H9NoxXUDS0hZ9VX8S9hK4rUGAAFWXznHjOntSK5ldY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "7a196e6b35872d48", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpJhdZl7X4YXxzKdcr7SJ3C6Xadtlo0ixoS8DNO1jaDHs5MeGKu9nx2pHWPpnUWd13HZ8inxG0pMh3un84TucDfyLu4Z-5Gp7Ka3HBfPnt_w_VTiuE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "24b6a3b712ccab17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqe_Q9NPVf3WTO_It8-A42wh0FD3KyF4kopi6posCEX6l28cnowG8O7WSYwETklwvpp0Uk1fCXiVmgwqLlWCSc3ez-nYllKp5m_UVtxup1E1UNadGg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ebed90fad884e1cd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrjLjhrtR5T2TdsRM6X3HPy_6vnZjhUfpjytCivwknDLKhiMkND-f1qeZ4te6AnGrv_qtnGI4OGajQJaydHJvQZGWpnBQN9VOXHqYjSBTCxnNbcbC4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "31ee7f558375c66a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up70C3kZ6tTqgyRHbTyjXA5pLWMohouKPHCPhexU8UuWw0cAmLxjHcwaa2xBT-Soq-CLyVxoWeEkxFMhRqqK87xaM38Q2hSjGkeXEhmhoZ3iy5s6D8" + ] + }, + "Body": "" + } + }, + { + "ID": "72458186a1839a5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsiYUGF50kaeS-bMsAR51mlDVCwH9HwmqU1hCFSwzrMDgCmGF46ZuQGfqWx4SJprlBWIxaNwT_EQMKQrTQARvFyee5ZOJq21xZUsVEc1iBW0KkiVk" + ] + }, + "Body": "" + } + }, + { + "ID": "20fcdf5cb9502a64", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8ALydoTkq1rLUaKiwrh6YEhczTwI3VX8iKuCNoqr6N2BSBvtxD2Qc9De99Svk_4EX82rynlWctsS2F9Ffr4WuDby30_W1Fl7TMWFZGWfo5Wc_nfI" + ] + }, + "Body": "" + } + }, + { + "ID": "ace99589753f3389", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotVHCMZG4hcbTx20gUPBV1LsUxIAVuDym9d21c2In0VVLzBxOM_DAULuLEd9LTMmpNg6A7yW4yqEsnRlHhAaMbpe7MwAWbadF2gJJ2M7uGdA-rcLQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "466aeae01d2d2933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVuGt1j5ranQuT0nS5bzOIcVM5M42RwIHcF2mKK_H1ctJEqC_djWDMm00OBFXY4yxsTFdQ1ZYjWVnFq8GhUAWUsvsj4_B9Ur8MzJQcbAzG9vHgIjs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "34d8505e483be050", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZWZhdWx0RXZlbnRCYXNlZEhvbGQiOnRydWV9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo5A5V7O5JcpElASqEqUZ_UiEjPa-39PCu3SwJk8EE9cHfyeVL5DWQkAss8k9iRx7YjD4ZC7WnzE_ENz95PuKdccjgzeC2GLFAUzA-sR2iEf-lNil4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "728568d994e275c7", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOVH8W8OREkTrF0UKlK3dOp_AAbRF7Rsx16Zbjdl5caBObFjXETVORm7o-d-YYcPIjcrx1mKcQwxHCIXpdE5P6baH6WeY3Wc3JVF7UcQnKna-a_U4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b7230d8a6b23fd8b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "35" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uouwpmid1Y6D0uXWBb_WrmlZVHKYnw0RGbreddchrmPzQUE1H_DeclZnbl6Yb2346L4YNt44ZeZWEM2u46h7Zx45lw9rUFxZDQIvlXRn4vT6fW4WQQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ec9da9e78b750503", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq25oUSnJMHsIEARHohSGtkz5vw7RCXuQupRBliEjCXMeivgUIK0y9C3U4yWEJ-_182SvGUQLvo5rXxymcoyahwAEhfe21FJU_M2IijdiWsSv83eow" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ad49d37f07d631a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq02fbr_iko_zBCbJljbMwyW7mOaz1cqR6AWHm2ac3m9BcqrUXNkmRqxk0R2rZW6SAUC_KzbOze-1wUGY2E0g6wbYLUqcb8IOdqN2-ROvYO49uxzjE" + ] + }, + "Body": "" + } + }, + { + "ID": "7b04c14c4e323488", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJrrmhbKyKRTvA3_mNqzRsaaMOAAlvn8UMwBiOR77-6X6m5InqoXMb4qoollPzgpcFOmchQ54jFNic0xJf-tiGu0tlrzSdbjh5Hzw8Rl3RnJy_jPw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM2LjUwMVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNi41MDFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "ae3e9c648aaff158", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJuYW1lIjoic29tZS1vYmoifQo=", + "X7Xb+/Xtxt2fLTn7y+yBvw==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJv7oOcHCzf9atnIPo70j3xSldrZtXTb5CmV1iuM4mMPU4hiBgpDqCAdUDI3i9hlR2qRie9qT30AJNshzO5CSsuzoTQ88W255VO8VVumaCSZ0KXZs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "e7a12fdbeb636933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo-mQ6Bnw1qbAyHbMdumiv5YlTBrFQbHo-9tXn02VufGPTp3_Q8nXXp9VIcdRmzH2CJbhaVLu5fUFYZ6VKgcitBwcE-dCwxWrzaljgyT6zmM7x01ls" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "142eb6c42bc6f90e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrBTG1gAXfMDmjm_clsDlv04E6VRtZUsUKnaVr4bC3AIRXILH2f0UebFf1_SmXPSf6Go8sw7TXti9yO112PAG5QNin3LwaSjRuFDSH_unnMZJwRceM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "12487e4f20538041", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxyG_OCKjwh7TR9zL9nbOGPP1ttxni9cP68b18nTZ7wZ8WqVE0MAGh1XFphNYZEYr-2kdgNgmqQrOvFiNbg6hJi-RfjCWYg0KHPLKfzyWc-gejszc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "e3a625bb7418cea9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVf0XeyD5sC9KvHEILX2e7x7VyJ9d9xoyrP3NqGFlsMgvK3zgeOEOXDvSpftUSy4opndA74-LNRkmAi8Q_6wc91iC8OWV7X3VR8kENWwclNK30V_U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "2290ac6bb34b705b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKagfJuJDLVLgIcrGtlV_UXlXLSqfEFj2-knllMBWWLktTv22ZfKoeSo8lY6gSfE1LKlvn87WiyJv75QWpcWdXzvablAMBOAZ_XMe-1D8dKZ5ipHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "572d50dc9b5ce8a4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6ZmFsc2V9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3194" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrWVb2L8OGfosYWTjk_F-zmyNwltYI93f2tqyP4EkOx7hIRpsSG2iGDFXB7SP7DKn-teczhb9tiBKRM7rYG5vcJ5jVinideFoX44khld5BhtRaQsUc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC45MTlaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSIsImV2ZW50QmFzZWRIb2xkIjpmYWxzZX0=" + } + }, + { + "ID": "bda340f0117c1fb4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotG32AgmOwDD_bLLd_W64JCOZQ2-UgeY_cOZwomlkWb6Vx5JSE54tdsuCPvawuyJ8eNuCf8EM0qXlIjtAyg6QQiuN9t72vHrQLsrSs6WtJBDGZziU" + ] + }, + "Body": "" + } + }, + { + "ID": "9a0b7379af024a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7S22mcdvPtDq_Z9iUbo4v0ApOQXNFgXFNZuacd8yZUq2FhZKco01Z2T4vJSv8s1yZkGXzoUrAuUKnCBx0vZuE-MDGnmTF_YxhQoXobyvAyktswdE" + ] + }, + "Body": "" + } + }, + { + "ID": "affb380cf86f173c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:40 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrF4q2mbuFoVeV9q22libc-FjfB1eDXSMYhNGZNU6sbaMWEKdc-8eXu1gtMLC7Ia9bSRU0oxgpIRR9516KZRcg7o_VGdrU1lliVPuSY9rYa4OsJymE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjA4MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC4wODFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "88123486ecd710ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJuYW1lIjoic29tZS1vYmoifQo=", + "cGCusp668ZMwY59j94Srfg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrixhGvSHn6UzWmshHjh8Ny2NE4LW-gcPPW4dCm9dZTDfBwtENYMJra88jZlphQFcyabnQwTegZW9_6bL8KYNOsyYQYgdtEDvFFMQeJ9B_IXQbrsgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "042e51e8b073dc93", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTH44ERv_v2A5OmhnLVBFOkQ_bPdeDI-MNir5xjrZt1ybrczlk9GwseHZ-kt566XrMnuYGTaCkr4ImF__dpbt98CqMpmYkiJaR1vKcI1DC7o3Dbl8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ed898d656784527a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "83" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urh-jh9w3q8AC6TmG1NF_6JgV-Js74mV6EkB4ccrlZ1ZCQ9TlA4AhJ-J6rkqtJ9fKiy4YjT_l-TN6dQhnCwZvTUqoaJC-W-z6QQZywuQ1uX9a90PhU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9ddea19a0c44c449", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoZVnUhVRVPu1unMjxNHQzZrlqTsVtPMZHc_YPTkvy18AAD3d_TOg9qTNAEv5NXUC5n0XF1sPedifaWR19a5w7dCMltRYfuJUfGxaMzyrvpqKbGjx4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9b6c4955472bdb51", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoybPBMseeVzkBPlzTObhT-eKnJ9TbsC15btCmXKMDvqgD5Uz4JclrgL9lBqMq09UjP8GL2_3zCDkfA0gJ8SHCDZbHEi1XYcuAUT7hmXazBL3IIMqc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "0227238a512229dc", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6PS1r6NaUKQ9158DD3cKWHqyParXLdcW8k7sToMskeTU6BjpajWENp3qNHqLMFvX9NrK78TIw31h_ANYkltj3vAQoYq2RG5C6ZB2LCvUIch96SMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "7a81946424736215", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjpmYWxzZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1klBHpEl6qb-hfnVYr2KJLmo_3_1fgqlMYMBpBt23b-U9pF7bPcNEiJaMtrGEhZPQb9-QnqHiU8qPfmpNpSdy9KR41FwZm-89gyu3EtE5gq3J9-0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi43MjFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSIsInRlbXBvcmFyeUhvbGQiOmZhbHNlfQ==" + } + }, + { + "ID": "7865e37b40b9ec66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2nfd81k4cBaY8ZFRGIu8G6DHyGUVAdIJCB65E8ZqqO9ADJIV4K22sQaOA8fqGL3jyi3tIszjBVsXHOFrCgca1vLXSpA1-3s2cn-MVhTKpzZ5tzY4" + ] + }, + "Body": "" + } + }, + { + "ID": "fd9104ab049c0173", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMqIYvjYuUxhnSLyoY-p5BYUiuZIX7cs418asOvABObfrQc8eXZpo-V4LSZVWWgt4CNoviMvFwcdC9sCTKCZU1H-9Ifcuf8lptTMZpiWMJjZfD-DY" + ] + }, + "Body": "" + } + }, + { + "ID": "e735515a80a67931", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "105" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjM2MDAifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "573" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqxx-CzGd9qn-QCqeE_iYOkeO93A0IpWx1voocTxhTrdkkBJonYBIWXeaEq3g-LNThw7j85IKVotWyBNZlHzqYSgBchb0DeXXetMqS8WBkY603Sa60" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0My44MDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "94bfaea89e1f47c2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJuYW1lIjoic29tZS1vYmoifQo=", + "29UJUEl2/QvM9FnDBFnPRA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLSIzBtq8Qn_3f51_hlfoIkcpamafGwB3U8er3K_mzANwxXyscBHWZhaxb2-3M2wOT6GZER6-zLAtdeeSNtSlUMHkHX77gocrZGYj103HA-AGjVSM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "7db6d4640ce21307", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUK3qzYL0xUcMKfWD6mmDZqRfnd8q7O14gBrKng7OYalNXZR3ZegpD1VyqjzW6bgQqFvLrtFjWXwPoJ4E78_nThYByR2n3Hm33ms2fvCdaRSOy1Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "cec800ec28205dab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAm9jtf7nMSv2Bbn_pFGaLCmlq2CtqUdGeEOyYrkllqiADBk9_xsJS3BufmzfGIdARRHN7jVwbeHKev2CHMyzWoGA8UxNGNIldPCC4oZ1dMPARKHk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NS4yMjZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "c787f6d5b23123a3", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgP5ghUeBxvXks4XlillwzLwfwyRGxt5y-FImGHSwZ-Meht6M0-keE0YTUG2Qy5hH5i1gpWPyGHXweFDyrk8a3QbDPnG5shDEoxzNOlMEsX6V8-4I" + ] + }, + "Body": "" + } + }, + { + "ID": "144726e64e401a23", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxuxvNJ3APW11H4y-qweAzn3ChIZ0eRjSetoSHkGwVvO7nMR17h5Hbj8I8zgOGjR7HPA201lDCw47ejD6kj1wCz3ZTIrUz-r-qjdm0vGGszQYFidQ" + ] + }, + "Body": "" + } + }, + { + "ID": "eebfbd37f51a5cc6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uox0foiLUu9KuEF-KVU8lIN_yUKAJsYEuwVqgSXvzFRGmzJtSZvFeNvMDS8W0aQ7c8YoI89qS_MxU8zBSBqcljYpIZe1NPIvkEFEG5K22RJkAuwmh8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "c7ddc04b5d29113a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpD4zsQLTflaR7g-z0FszOHvw5oXvxqdiyjlAnMwMcBcUEjidrpIGgpvmrz1EWi2AyWkdDCSY0l8ItQxYnWwSZt24jOxXBlzq-gCNrCCE6j0fJVGKw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e08a38dca4dda51f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrsphQdkHT0ctCFLyPV1JRkq7cyQ7w1tvYmAWv-6tYsuThaMN-vkUJoTNz2gPKGy6jV0yqdbXaAlRlD_5n6GiQa-ULOyKLi-C0q8y25lfk7L6zROE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "fe586a68284be237", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAjkPxULicnoY5wPnfrg-D7yzTbTzTZcUShEN8JSsqVMN758lI4DlkvFibNK9URhYVBH0xclTheAOO_CEDoVb5egvcF-WJqJ3SDisU2YPaZsohRro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "732c3421adfb99d8", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohfaTh8JFEjT7TTMwV7NkrcAsgkdpDyovvbjHJLNbXalzOWX3_HwhowBimaqZXhkuvBx1D38zsUZ5dEzigoyoXRJtgHfy7ClUrshBL_yMfPkZ4JE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "aebe46d0a31a6e95", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uop7c4AxllIcwFmLfqhIIaGP3Q9uHfmQoAm9LttwjQIvzHiJvu4fg_Q3UfNP8YbDuP97jLsG78JbCK68eSQv8ZHPp8kpzfdJcvOER0cs75wQ9gC7NA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "bea48b3667650bdd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1p1nEuvoUAqYHYa8wuU6qF64gN2JPCxLqqFEUiKXLgo9rwPH_btm63155v5hbuS6sow8cYEvsXVJthsJ1Zn8qfb05USodp1EvTTrt_m1rpJnIxdo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "a951cae4048b1267", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZAHhwxqSSY5MYFK1fdLLirRH41rc7W2YWSnn4CQZkbdR_98fRYjddtqZVfpEizqsZ1BNa5A7ENxQz7BwQSamASc8XAC5gyZp_pwi-Yu8l0XLSCpU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "e7f0506408261db3", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotOOFJlGACBcheGxpS8qxLlGsDT0xOhT_Cg1M_6DxUI16IiOu-YHaPvwzxilOKOrTvL8kRzcVUiNuFeS6rQExTmdK70I1IOH2d2BLOrMm1L5Int1g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "b2c4589e9b9b5ad2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:50 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFdLKiYfqS-J3PXjvHGxpXtJwVnUB7Li8XrR-DTYkmyed-ZlUmUcmZFgGyIayuACtUWVYBcRbhYOKvgkfl8srE0OdJbc15OHWBttYqenraJ0MdXj8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "836baafb4e98da80", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpNuySlLo4SanUdFJK3SGoCidNgWEW4mUuFDGMLLSzY-UUA3TJtN5_FLoLq7et7d1YSUDlOoGZY4a_GdewGQtsUwGdbHLz-bopObpzPy160Wm3UMpg" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "281ffb73e66efd1f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo8PK9JysFiApLjDSbbo9GloKqKjLO8vr85RWGkeC_Q0iEQf7zu2mwQVeiuJ81pfdJCUja0HrhNG13sczI2XoflqlsWNFvdxHGjCBrvDObr-s6Y32U" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "515ecb8616d48a7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqF93J_Bpwg-_yefVpKWteSyWBSx9FqNm8MP5eZKqVfvDWpSF6EPZsISss7hNOkKMM5OuV8stlJIW2UAkzsSgkYOHx5fCwOwylbtqAyb8OlYJCHz6Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "771e55d617425f26", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6Slob_akPo2UWNmzCe-LD4cb-9DVTq_FPlNxA7mKVF6fBFks6gFnZ83BSajEU2qD8g14Y-pX20kbfoKf8XGR9pIW2xXH_oHCxsjspHZ2ARGePTMA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "9526aab2be8b214c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UopNhdzHbHv4sXyk0r1owz7bkkPQwYIGbb3gRingbq7vgESKltWLIr9PDDIUNREfumEvZDfpxMineWYNzZ0qdOaI1QNYqXjnuxO6sCUDjBYEhr76uk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "0aabff8aa1cc1e9c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTj6zYv1NMHDI_ktA1d-p7HKq4YnJMxxI9nXubPfCMyIspOgya8HYibmvOMFwLtCPagnzRHe0HbmTCSqJpYMay0xYZstGSE_IUWRqI9lUhzq0Iqe4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "05147b5b5fdd7882", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoshhz3RuOTlNgZ3gOLis_dSFBsqr7khyJCdWUD8BGk0_j-AHxA5etttVHuHTrUcDksF4LdtfJLhlVrAJe9PrGiYZ-Wy57j6tb7L6vc0qHoTvolIRg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "7de8d497e7e1a76c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3mjbvLxwRDDhjg8GTGBNcJM7EraDydPsmD77oQcyYYGVlQnUqg0NQbezf4KQOoOHoEnC9jMR052e0olW3qi9KdAi4-qJEiKGI1rGivb5hnJnmU5o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "3876040740117531", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGdmh-XzrU5d5VwRugwWr452hENxpWf1sEtNCDaGksLJ1taCgnEjL_0oAlELqirqyXLlw-frE8LYo1V37Pk1G3qvWGk9E1iSZV7uECOjsdE3JagK8" + ] + }, + "Body": "" + } + }, + { + "ID": "39fb456e528c7081", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKcAQsTdqHfiXj71c5kivJTwCNhY6xUQpZtaxywsmr1kNG5_MdJBsOvuLVbDSHKRIsG9tDxk_9Gz-7zaqP_pqke1aqKOFg9NeCU4ZqfjD0SBy0Q5Q" + ] + }, + "Body": "" + } + }, + { + "ID": "3fdacef865a6fc39", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur0PIkQTUvOl8-6tC08mwiS3nBPydo1OYUeD6Q46ZrU6VkmpRwxc9MikGqJlsejuGZkyY_aaxUVKYGREjOF363tfkbII3of6pGFuQ8rINOf8uFV5ts" + ] + }, + "Body": "" + } + }, + { + "ID": "2da48fa864dd9efe", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWkbW_2n8KvRkdk7Lh_5eOVBjW6JvoW0i9LNQUHM-Df2H1QIbUYIwFnUGLt-bPfFa46Ur-v8Q76mU24KpjW7WIC_HVq4Q5H96pRYk7I6ggUsckTs0" + ] + }, + "Body": "" + } + }, + { + "ID": "3ef26f9548e0aec4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqZfOfnhYy6nk2TxHz3h1eMu_0KlczCaOb-xcAIBRoRgxk7Q67BqsT5fvb-vAky2phYzQWNHLTk-0T8V2tpPY4Hg23Ub9evuo6_YEndEVdiBRhTcFI" + ] + }, + "Body": "" + } + }, + { + "ID": "8f3abbeca307cc5a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqdSQK1TuFGAgs143LV6RLNjzfrsAl5vt-Jb_RQ4dcVGDzK8I29sKKrPmcY0rLIJuVIQar3mdRGhNLhqXp2xCKli28TFq4rtsIgiA4DxCtdVbajIhY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "bb92a1c4ecd9e884", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "aGVsbG8gd29ybGQ=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3315" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "COup9/Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRoCa6t1nTWVUrdfIXfyxZBI6PKBkzO24svoJRDEYMEsduIrzBpdDiX8mIUWSxcL8-pSRZgcDvy9_iHKpoVbiUNt4QhlG7zosGpa3WOk--YlBxjeM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk1OTk1MjYxOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS45NTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NTkuOTUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5Ljk1MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoiWHJZN3UrQWU3dENUeXlLN2oxck53dz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk1OTk1MjYxOSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvc29tZS1vYmplY3QvMTU1NjgzNTk1OTk1MjYxOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InlaUmxxZz09IiwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0iLCJyZXRlbnRpb25FeHBpcmF0aW9uVGltZSI6IjIwMTktMDUtMDNUMjM6MjU6NTkuOTUyWiJ9" + } + }, + { + "ID": "8f2ac73db961f1d6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13884" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok63dRFSkGv0Fum7yPhHf_AtYTWceX5_zkck648-jhWkyym1ezg2Og-LqKJ6nnZCW_gPZuU01OkZErKAQbL7mvQroCq8nEtNTEAosMxk9fJRKpYlc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, unnamedArguments=[]}, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, reason=forbidden, rpcCode=403} Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00"}}" + } + }, + { + "ID": "5a7a0988789dfb17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZIBPbBTYIUYYaBFrcXzNnp7uuC8DoPry45bX57PyfL7aM5a83NLxYnvWsXD62w3LrRVksNhJbrLnSL5e8ZS6b0Cto8ufh-V00gNrGI-CaTc4achc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMC43NTZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "8410bda61aa0b6fd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2MOVFr2ZwvkwYm32qtqndRBhJnBc28nVBEBowKjIIxpOxiYcSef4wtz9sLTJ23PbXr1F8L_TfGwyShv5j1eTsQ-91OXU8Q8JEZtui9VVyK96j2D4" + ] + }, + "Body": "" + } + }, + { + "ID": "7b024f33e16d850f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpDkcsqcqPLSySgf13o9jWNafbKdVjSRqGNu3ZedvF7TlL9TBswdMMpcl0zQOHtvXWIrhrcZODr7RjRhHuvS8gJdMbnYeQDHrBCbiH6x9WrnaZ5uIQ" + ] + }, + "Body": "" + } + }, + { + "ID": "33b0efc1b91cfb42", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpItvsNld0b1FGsjQgaVCDxW22UEra8vRGfkWtbDS7R2kMbHRPr0I6iPjzaQBOaa9-xUJxGO-JV6RhMlWwH2NY-FXY2-0lrkZiAzIL-tdFwkCwlF-Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "632b9a0387d9cff1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2520" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEVtrO8ed8TEHdpzyw857ZykloGKTT56atylmGnu682Y0VxI_T6SWN0JLFupJ_2Cu80ntiZttFSCBqwqFGfxVrRqsNnf0KBDyzqOH8eI-BaWPBXvE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "a32a5924f876577b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "639" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsYL8RxAKClLw1KRdSSIORQHYKKf_HMml2vtwDOFrh0kTYDzhGEgX2RJAPQKOP2-70q7ZZ2VfkkGz12tmTiRgSuwB2dM3xxZxZucASROVeoit9Shg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjY6MDEuOTA4WiIsImlzTG9ja2VkIjp0cnVlfSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "718775f2f8320cfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2536" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprHupyIuGJCYeUCPKigfkARZRkCc2Jz051KFrXK7YS0FbdKxNe3r20jon1ejkooHsAtcXQOi1tDV41Ob9dJ3Zoyj7stDImJ0eQriMO0WLLQ__-V54" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJpc0xvY2tlZCI6dHJ1ZX0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "2f4b1104fa94954b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13774" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrafLpm39Fg6hBYYi9lSEizIWUdMduEDQykOeT9Bch6uf9BNQUO9sOAnWpTklSpNac1yW_kSNj-JR8LMmVLirC2kqG2DcQdE-Hg5dtVBkVZ4Xhjlc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., unnamedArguments=[]}, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., reason=forbidden, rpcCode=403} Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'."}}" + } + }, + { + "ID": "a8964565736a4805", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrNbiNCNtu8DhS-gNXfoe4R9DFKEVzknZ0g6YoI0JdvwH6P199oVJ2oaJwh9WXMte6frhDr_vJo1_PlxLhezMnUTJzMYZgPlPWdU1QN9imk3hXgawE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "49648eb4e4e2ddda", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplUHmGCg4wOKtpKVz2KH0cn-th8s-KXQR62bOkAF1pV1zIZJvsoI_tURcFttkyHks8g05Hln5fT4Ej_O-Lx6JCTovYYwhhHIK7AAcialHMj5fPHk4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "42cf12714cf13289", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcjWbo0pc_i_GQTU2WE6YR0XhWYMGRPJutu1MQXo6XbRKJAYhzNK4p2-_gCqZ6BvNGncUP1UZNoTrXsFFGt1RRTRR3DS9XNiws0gt9YQdp_I8dqqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "4f0fdb3eb607f33b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up0EsbUXQesAgz95sFt6OzWRDj5WAXqEpYaqHgGvCClYnBGvjX8eQmor80UQOCIDzT6v9ZAtOPVhFIqkJG6rV0-CDXiLAnr4gwDrPkqgezvm8PQTaY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "a5a8b8af13be21cf", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:15 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpF7Lqxvm1dNC6q_v81ZB1Ui9c2ZLJ3GXLX6Y6UNZN1nbkF78c0J3NwXavHF1cMFZ5oP8ufCnmCLg9vQHM6tfs2ih8YxQSfE4D3AxGsvBaGNP3VUMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE1LjA5N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "e1dbaeed8c6d1ffb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=0\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12155" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4dRXLSTXYf9VXr3eOCMfuLBwDLvNQMXwIYjwEuSFQO_p-SxZQXQJ7I_VA0T5Hn8gnnEZIqLV3vLCnurXr0cyHDEMpCdxWOiIiqsXlgkbcLKf78to" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "814dffcceacb3bd8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026kmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upis-8CwJYvr7glFAMnec_3T3YDojfTz3O_uICK5tQQmfJF2_rFtNnbqopcxBL5L3aKyoYe4zOLoqN4_MFFrhYKDm6Tmrl3rytuh0ymlCV15VZKyrQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "9b05110f2b59351b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "\"-CMGB+Prx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:16 GMT" + ], + "X-Goog-Generation": [ + "1556835976741057" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ6aSiGPoeZKANPoMPgXhL1QD5nbILvzv8v3sOMId95Y65wOPssqesNA1MXzp86JA4Qa5gAMxxx5cnY4z3-tCUnvsKoIHn3NXOK2U6ZSRLzYNeQeU" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "011b81453e7060ac", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq5t8aexqBqqxnLJFK9mWmGdVrFfd3gaCGOKfGJMO1phB3ATnxt67-cLVpELRy1gNnjNOK7-zMzWCR6yV1v5PQcv3lccINA--Q92RoECpvBON-QKtc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "0dbd0a37c735be56", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur_RD6un7NLXQpzwRBFEfZdp982qhfEzyvw8f_P1Lx1wKv8sUid99lQ7Nba5Rs73IJfv3qHXjQfUDHjWclvCAcS91nMboiexn-FwiMrUuKqCQ_d__4" + ] + }, + "Body": "" + } + }, + { + "ID": "1b81fc72e201f2a2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY3NlayJ9Cg==", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CKCxsfvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRTb3RtqG1gIL-ENbVZC8nWKRHapQP78uVzv4yyL2t7N8cA5jEBWa014iJALicWO06KthvAMPLctgZCJ2UvPqo72IO4_LheEfTr13-_h43M-roUyc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jc2VrLzE1NTY4MzU5Nzc2ODEwNTYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrIiwibmFtZSI6ImNzZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNy42ODBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTcuNjgwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE3LjY4MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlaz9nZW5lcmF0aW9uPTE1NTY4MzU5Nzc2ODEwNTYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NzZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NzZWsvMTU1NjgzNTk3NzY4MTA1Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJJbzRsbk9QVStFVGhPMFgwbnE3bU5FWEIxcld4WnNCSTRMMzdwQm15ZkRjPSJ9fQ==" + } + }, + { + "ID": "6da27052d0dabd22", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026destinationKmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3372" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKmOV156VWN4edUkrNip1urAj8WTpDcgGTS520rVGilFT71KrZLZQxUMKloxtxKx5R6V9pUvIqYym68P8lQ8RSc5rEE_D1KlH-NyPgybiPERbxYZ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiOSIsIm9iamVjdFNpemUiOiI5IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21layIsIm5hbWUiOiJjbWVrIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwic2l6ZSI6IjkiLCJtZDVIYXNoIjoiQUFQUVM0NlRybk1ZbnFpS0FiYWd0UT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWs/Z2VuZXJhdGlvbj0xNTU2ODM1OTc4MTI2NDc3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiVUk3ODVBPT0iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSIsImttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MS9jcnlwdG9LZXlWZXJzaW9ucy8xIn19" + } + }, + { + "ID": "6ff8a4be1ed6bda3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/cmek", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "\"-CI3JzPvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:18 GMT" + ], + "X-Goog-Generation": [ + "1556835978126477" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDR7RHLf_CQraIBM5Z3vCUxSJndE0E6FBOLFBqmYvMntnrQyVIfxwuTE6BtHL4b8oQszU6rIycc72JMUjiGRM_XoGdjsk-WhsbBmL_3c3x5H1s4bs" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "51f1995364e644ed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3271" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "CI3JzPvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM1hbkOXC-oSxvoqjP8s2MpxREJEpzVp2TTYj-6B8467MLYSA-yrWs9UoGwRGQUCJTR76e-qAmQnbzGsFGT31Kc3qjevb8SM0MynLHL9GhOMkm72w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrIiwibmFtZSI6ImNtZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21laz9nZW5lcmF0aW9uPTE1NTY4MzU5NzgxMjY0NzcmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDSTNKelB2eC9lRUNFQUU9Iiwia21zS2V5TmFtZSI6InByb2plY3RzL2Rla2xlcmstc2FuZGJveC9sb2NhdGlvbnMvZ2xvYmFsL2tleVJpbmdzL2dvLWludGVncmF0aW9uLXRlc3QvY3J5cHRvS2V5cy9rZXkxL2NyeXB0b0tleVZlcnNpb25zLzEifQ==" + } + }, + { + "ID": "8972a840dc3d95a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmNCzrFh_7cFaryet35nLbKqM6dyo7nvsyZApHIj6fLKcMDb9ZA-_TRNjGOLNFjjoD0IZ1YOz9P-ftNchBFkjWDTaFzBjbHmryCc9JInU3wd_DLw" + ] + }, + "Body": "" + } + }, + { + "ID": "1e28b278fe3e0a66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrX5OILXadm6r5e8fLyuLEhQncM5jrmmDaOesSqraquvQbXXKhTqNJ63RYU3qj90M3FLjwnAOz8oOBTQQDOq2sRqUkbpBbJzYj82Rp3Rd9VLpNns0" + ] + }, + "Body": "" + } + }, + { + "ID": "d1ac5d123ee4bae6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "200" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwibG9jYXRpb24iOiJVUyIsIm5hbWUiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "609" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbajYTiCBBelYuwcqOs7_O-SyJbcV6WGwYhC4JCMmeImLlFLb93JyaiYrSp-201iYZTIdQ2VViFR_Emxe2Z7b4jV_G-0o2xzkqnkSnz0-Qcw_q0Wc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImVuY3J5cHRpb24iOnsiZGVmYXVsdEttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MSJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "d035cdb991d0fabb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoi5SKd4TdRXg-INW-U3pKcPA2ioBWIc2zdC5W3J4efS9iZ0EKFNqprjQ-NGEK_3wSDGezk4Al3Dd4JovGxXSnH73rIw22S_R__XRrpTQmYpTiNFGQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "672cb2bcc1ed4ee2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upyst1aH69iaN9J4055oX7H2yReN5TADIX11Vul8KakNuYZbJ7yXm1GBdvCDVB-IPTvbhzELJWgfurnIHxGpzKJszLO70n58xMnVbSLsHX1J3W02Lw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "c037ecafa6249395", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0019/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "\"-CLHMt/zx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Generation": [ + "1556835979879985" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrQOFSgCkxAaKVPAQhz0ohMN6irPryraHLjWDEQgV3dNEwc5cxqcDDRChzx6_HR1Z_NPlm3vmjFCJcNK-2rAz-tmvVS8w36Q5U9Lc2wAGUgL_joX4" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "fc3eda648104f042", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHPSbutJRXX-6i05QnH_4UizrPft0fsGc3_ISDNmXQ4C3RPbEAnNfS1Ren0j3k2z8zxk9AV1WZQCWiKG9D5A8gZAZESFxbSZyjbBH8jkLEsEX54NQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "13283bc1ba20f019", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2b3OtHWqpsRWxurpcGgrEtM10ZDdsrIa2z0nFsO-P4G3RAGYMTVMR0MCtwLDePrKyhfnsK-d9T5AIcRpeaIe8Z40Qm7gkZP4Fdi9jXd6ceYe5Iwc" + ] + }, + "Body": "" + } + }, + { + "ID": "ca2255586aa4aeab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "126" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur05CMGbsLFprbwWYFSeE3I5dKFdFUzO--ImyAzkLt6ueNGpMDviyX6c1S4F6t1bqXTSnyQXXQzgugu8JoecrL3-1eJOPsqnN-OBMubntJCblkyWqE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e1c336910a43425f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcwbg5Z9C0rFvSTWYj4FxF27ooV0028Cm-yme-BrfmvK0dWsRkfRQEMWylWwokcmM2kIvLKEiucJlvDLHvnbJPB_HjH3zF2Lm8xCwV_9gPN42QDeo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "581a002b653f595d", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "20" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjpudWxsfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upv2LawTqc_eDVGR_hv2jSCyEA77H33uMkPevVxUUDKj1u4a9IWOH2f1oMXpU52-49Jg17SUSoJUJai1Ueff3ahN58GC9FCxwJt5v5xOHI3SytFjpo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMi4xMjRaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBTT0ifQ==" + } + }, + { + "ID": "a0db97c641f3f17c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo15eoN1OrQFM6eqxJcuNFl7Wfr6x_VvJ0ENBlhhC_Hedf5al2OplGnNslWnRQT9RVs92RVLevZ_F_ohutgnNsUbK1PVsR04UnQ_A6ea3C0ZLy8FkI" + ] + }, + "Body": "" + } + }, + { + "ID": "ca84fc3a71a55ea8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026predefinedAcl=authenticatedRead\u0026predefinedDefaultObjectAcl=publicRead\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnCCRKNh19fdBAL3pyCzbtLnG7z20pSwSZKl9uTerD6o6-B5mB79mqZRIbXNcUmcpTdpxTreKjvsHK3Y9EKq_5fkzBPInDj8YkjbGViIpd1ldSots" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ed54d82686b4d390", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpXJh1_aAMuDVjYxLZJdwqAt7OwZdmTkc0_5wf_LshhNm_So0q87o7gJ31T58vp1b0_CucSecAyyyOLUbYTDrWDDTZGMkZBpfx1iBArjRtBcud1IgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "224aee5425ffb86e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026predefinedAcl=private\u0026predefinedDefaultObjectAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "33" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOltdLCJkZWZhdWx0T2JqZWN0QWNsIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1113" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:24 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoknWPZdEFnvKZGCGO_Z26ZaKeXNdBzc6k6qqlbGdQXVEtPvLPR2oYtKCXsqDgFBjMw4wfWVXwDfmcc5u5kBAxshpuzKGgCp8UOxv-i_Rml56mzrOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC40MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6ImFsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b6a80796e20baf03", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o?alt=json\u0026predefinedAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJuYW1lIjoicHJpdmF0ZSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1995" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMfGdFW2xElqOJcP4q5XeDjVehJr4FmjcFPij-Y31f_168_Jgb6zibcWxCOJzrV1aUYYTXZW9DMX-BcvfJi6YXoBCPpN23EhrGQyY7tHSb0SVurGY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjQuODQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL3ByaXZhdGUvMTU1NjgzNTk4NDg0ODIwOC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJwcml2YXRlIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODQ4NDgyMDgiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNORHE1djd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "4812f1e0e907256e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026predefinedAcl=private\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1529" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTjsMMIlet4CWUmGoB3TuGzhTHXlDwuUe1SAQO8H2vmbTo2QYYV_WozfDO2NyZpsA9LCtgwwthwxeEeEG182WQTTe3xLWnsVjqPZx-iho8JAgI66U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuNDIzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFJPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTkRxNXY3eC9lRUNFQUk9In0=" + } + }, + { + "ID": "969763dc18755620", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private/rewriteTo/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026destinationPredefinedAcl=publicRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2017" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNk9xKdyuh-I4RkIXqQxMF4A82DZ9b2HYZOdVK7ELOsnMYBqOmrqsY5hzZJsLXMZCZ6zOSzCeefQZuTgYHwyd-jAttJ4QoA1lDk0HTwZqigIfpjCc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9kc3QiLCJuYW1lIjoiZHN0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODU4NTAyNjkiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW47IGNoYXJzZXQ9dXRmLTgiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuODQ5WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI1Ljg0OVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNS44NDlaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdD9nZW5lcmF0aW9uPTE1NTY4MzU5ODU4NTAyNjkmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2RzdC8xNTU2ODM1OTg1ODUwMjY5L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vZHN0L2FjbC9hbGxVc2VycyIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKMy9vLy94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "8db7e2a6b620e794", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp/compose?alt=json\u0026destinationPredefinedAcl=authenticatedRead\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "130" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6InByaXZhdGUifSx7Im5hbWUiOiJkc3QifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1906" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Etag": [ + "CJGwwP/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpqjCYxNFNTu3V1H_kV5WJxkbEYE9I3H5fYjfGMPO7n6C9NJstYZGcIh6xVA5RAxMCwP1PdzXITgTI1m2vm5xP5nhzLVRHKhder2qzYFGsAooH-kG0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9jb21wLzE1NTY4MzU5ODYzMTUyODEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wIiwibmFtZSI6ImNvbXAiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NjMxNTI4MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNi4zMTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjYuMzE0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI2LjMxNFoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vY29tcD9nZW5lcmF0aW9uPTE1NTY4MzU5ODYzMTUyODEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvY29tcC8xNTU2ODM1OTg2MzE1MjgxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoiY29tcCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg2MzE1MjgxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0pHd3dQL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2NvbXAvMTU1NjgzNTk4NjMxNTI4MS9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJjb21wIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODYzMTUyODEiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "72a0ce58c513f6d5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWOQtHF0cKNME8sTEm9vbS9KKKLLK9afxka1b6TTVGxDjCQ4qAdnA9zshsfVFVKK5hNS10aABAQog_gjYCZTXbbsF4bQNBM0i4R7bK2r7saocPtFo" + ] + }, + "Body": "" + } + }, + { + "ID": "111b5a296b726631", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqPsYXABoYOnGueMXiIvH9AntIPEf4xc6i18Nnr_XEYBxy-3LSK9klIcNyfSB7we4lgbctsiYA2e2WWBJlfqk1GnK6YA2fj-e3IiPbKwBwTSLjr1Jk" + ] + }, + "Body": "" + } + }, + { + "ID": "27e4065cd2000d43", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMPK_b_vcRkSIj1t37L47as9idfpbrNWqzirdNBLwoaGDMPlbjatMzjLThnYRNwk9CqTNQ5H8eoKFbunpe5TMktUo_FHyI7rryoUhbCpkbytRrTeM" + ] + }, + "Body": "" + } + }, + { + "ID": "cccb109f5fba2a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVAiJOaePqfycNhiwbkPOptOqgpe9Tvo0jo1aW2nviozj4-QcjTtnGGyhcdrZ8QTxT-KHPo-saYS9ESC_6osYhChJOCi8b9nXtuqH0BqlvywXs9FE" + ] + }, + "Body": "" + } + }, + { + "ID": "cfab6af30981f62b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/projects/deklerk-sandbox/serviceAccount?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDBTBzZwm7GH6Jklv6hzuaZaO3JdgliJk8zLZkVNDqwDO7tYP-c06rzrTj03llkWuV_l1k9w2tbUcGG2Rtv-1MdozY6pktTutDMLfyhyHe1DaEIys" + ] + }, + "Body": "eyJlbWFpbF9hZGRyZXNzIjoic2VydmljZS00OTYxNjk2MDE3MTRAZ3MtcHJvamVjdC1hY2NvdW50cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImtpbmQiOiJzdG9yYWdlI3NlcnZpY2VBY2NvdW50In0=" + } + }, + { + "ID": "677e783b9aea8c15", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "vcVTjdWWssp9XFk0D3Nk5w==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdWRt6oMzDM98VpA0VDmWRuNvox2ZBw6ZpjbyOFDKCFKgJ92jsLkF8v4M4UUxaTUVpmr8iElUTld7Q2g5_MZSbRatDE2zVgaY-769Ppq7Nx6f0HAg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ff3c05fe02cd38ac", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/some-object", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "\"b4769c7d229f0720beffe7251bbabe8f\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:27:28 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:28 GMT" + ], + "X-Goog-Generation": [ + "1556835988273530" + ], + "X-Goog-Hash": [ + "crc32c=Sm1gKw==", + "md5=tHacfSKfByC+/+clG7q+jw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn6PsKCJsyZAhI_JtSOcVKs7SWuqYsoprQTUagoZXqu1CHWyBc1TzWfUyF9iv5VBS4iZPP5qz0Lf3b51p8O36Sud2-QS2zDg9FKhtm1uBFKLpcTOs" + ] + }, + "Body": "vcVTjdWWssp9XFk0D3Nk5w==" + } + }, + { + "ID": "d1c7cc7c0e1d2829", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqluzdkA1sUAJQJZodwWmM7SP7tNoX2JJjDiqHTy3XtjEqiVDNpI0whWTFJJH4caozRYjJPjTb6_ywm82dnAiYEo-YCknhlvkWexfTq1BM9MRWwQeg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "951f74cbe6b30b67", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoMRpk7F0qXPvUV4uNPdV7VhfUO2Mm-qPBbx3VV92y1wRyElPF1kbRNEGJ1KqbIW5w2XozD7IzjxBx4KevqcPxZr0_E1iU1_NASedJ4lZj4EFoP9UI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "aadc7916a3fc24ad", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11805" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhoAeZtt8ffiWaPr6J0LEaVR3vjerqtJ7gMM9BObOkz_6MtctDuS_s8-DZRxtTZsGfMDtekIKG2v8p2_sb3MxvTgeLhgO4vqvJNIszTaMjjyQcEdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=null, unnamedArguments=[]}, location=entity.resource_id.name, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "03b45b7eac939177", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Caf%C3%A9", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"ade43306cb39336d630e101af5fb51b4\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385878535828" + ], + "X-Goog-Hash": [ + "crc32c=fN3yZg==", + "md5=reQzBss5M21jDhAa9ftRtA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFU2KyS5OA-bAy_MovHXHg5zJZ2kznc6tgTTrSjilrNHiw0NimPHZApcRXOS_ejrSSy8nIn02xipxUWxvHM3ru5IYhJRTeBZxhlq6_XQVsATkvink" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEM=" + } + }, + { + "ID": "725883eeccd4b42c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVybyJ9Cg==", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "CKyp9oDy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UruPh9tBADr1LSQWFlj1C4tjlS6uwlcy_MVRYaTGD4e_qEUlq0iXJx6ns3JHbH7I9olH7aSJpDW6VQcV5EdLwjQMecgA7qKhxzLoYFW4BZdEXbUrro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLzE1NTY4MzU5ODkyOTYzMDAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvIiwibmFtZSI6Inplcm8iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOS4yOTVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjkuMjk1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI5LjI5NVoiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVybz9nZW5lcmF0aW9uPTE1NTY4MzU5ODkyOTYzMDAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8vMTU1NjgzNTk4OTI5NjMwMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJBQUFBQUE9PSIsImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0=" + } + }, + { + "ID": "036f06773f6d6306", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11821" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqs9uKpqdPwYRv420tAk0KmBT6DS2POPsIj39ROT-r3ymrd2SB-oOJEXuaHGOfih1ckOqp-YrvHHAh7l7A-6rwlFjoBSb4qCtWzXGPjekN0pUBIok8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.bucket, message=null, unnamedArguments=[]}, location=entity.bucket, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "6b9dc540a02ed56f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Cafe%CC%81", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"df597679bac7c6150429ad80a1a05680\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385877705600" + ], + "X-Goog-Hash": [ + "crc32c=qBeWjQ==", + "md5=31l2ebrHxhUEKa2AoaBWgA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrK69XFh4eDvl0RvSevMYIwMVs-nlDelMhoacS-4faSvuEateXMEqzdMvHClWdzkyqcEk7IQ1N0JMcv5uEPt6V98chYK-p9otm8puF4Uf846EBUEmg" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEQ=" + } + }, + { + "ID": "09f71762da168c2f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/zero", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"d41d8cd98f00b204e9800998ecf8427e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:29 GMT" + ], + "X-Goog-Generation": [ + "1556835989296300" + ], + "X-Goog-Hash": [ + "crc32c=AAAAAA==", + "md5=1B2M2Y8AsgTpgAmY7PhCfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "0" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrL3cJhuZXF-arxiYeo4MSf1to_dAoiyMisikOum5rqnrSnFa0OZ25Sl8hUU7lcIeW6P3-dwXK6qv0yHGFYMFnUB0VYpI9BRT16gMo8ikQ_L3gBmew" + ] + }, + "Body": "" + } + }, + { + "ID": "681729281517b9c0", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCNWRWyBsgMgWxsrSjXZMKW22GWVttfUWHw_UNSF7X7hnnz4h-cD7vFzaLO7OVWvnuMyW9qpJAA4eJaERT9hSjGfv41XVw_9ouvBOeIK9ykq-DrE" + ] + }, + "Body": "" + } + }, + { + "ID": "39261fa7e6e9aede", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEt-jSVBZqHHBwfNkeP4NXxMdC1ISsFp2E_M6JiSw9rDj2gTnmTZhrvOn-bo_f9Lx_HMxMDgsXfO0ftWqvUjGwdmHAYNnYK5JSJozLfuosPNxv_S4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "3e70965b0aa1111b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrB-4r560VjwZdR64GS_7_mZDefc-iM5ma_H8KjYfrdNXt-IlkgvzShsy7iNLseN6coOnkJwWT5tLbP54_M7nMUjord_jmpb1i19SBRA_3RQ_64yys" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b80f9a99661cd572", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4ItFRDN2_vB0_Ej095pAE061aywdVAoU_00t3fazm21R5ZLxUHpQL4ifTJ58218EkOeDBSWNaOaWuuzTnTM0DhXTe4l2X3pytOdfcZdIeyLV1rWA" + ] + }, + "Body": "" + } + }, + { + "ID": "979ffb3f3450c3d5", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqM_YYHcsYolLFJJwWhO9AUYmnc3Vp3J6C5AydYOy-0RVAmBxObG8QpwZOsn28vDVllo_OZ1-4bASi_RGi4wdply2CPhzUOCVjP11XNzVYZ4_EiFb4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "4931edd6fbd7e278", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxOq-rTQ-0CFxAKezuPpbibtEmD_yASKzhW5dOiUdwVORx6a7_e1dmDnNCdamDPwLsBOpLueQD8iDbwlracb2uxGwwZ6yz92QD-t-JSKM-jSNp2xA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "12c06b5419a1fa81", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc24sLEBwU00YKpSYT4V6iJu3WxME2LgbyRWaYj4kX8s6Gjatzl0AWcCwPRhj_Bq0BOoUhKQzS8L0GJlhHC36eyEWEIs2rk2BcUyo0aE3U8XbuHuM" + ] + }, + "Body": "" + } + }, + { + "ID": "bf24dffbe6cb8609", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqVkn03LSqmt8RiOPdW3JO4dpsS9vRp7VK8Qc0HwhmS3L9KDOgmyZIqLnvAwuAyvoIz1VsbqeoLwMMtz9pCIX216P4K3OpEs-x_bf_VSwMiFehgZiM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "2e41eac5e693f3e1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "64746" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgGJECa6hRJfdjE3ErHERxS3HPU_SALrzjf-HIDuv4lfxMvOwrkF2pfY2jUJ3b8TcIjWcnaa85Rs4LBucKffiSWfFXcfLZMF0EuiVRj9oH6Kp4tOg" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1","name":"acl1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864248170","metageneration":"2","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.247Z","updated":"2019-05-02T22:24:25.629Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.247Z","size":"16","md5Hash":"0E9tFNpZj0/WKOJ6fV9paw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?generation=1556835864248170&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COr+pcXx/eECEAI="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"FiDmVg==","etag":"COr+pcXx/eECEAI="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2","name":"acl2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864737946","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.737Z","updated":"2019-05-02T22:24:24.737Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.737Z","size":"16","md5Hash":"c9+O/rg24HTFBc+etWjefg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?generation=1556835864737946&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJrxw8Xx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AtNRtA==","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs","name":"bucketInCopyAttrs","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883663856","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.663Z","updated":"2019-05-02T22:24:43.663Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:43.663Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?generation=1556835883663856&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPCDx87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CPCDx87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object","name":"checksum-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835855962241","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:15.961Z","updated":"2019-05-02T22:24:15.961Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:15.961Z","size":"10","md5Hash":"/F4DjTilcDIIVEHn/nAQsA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?generation=1556835855962241&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CIGhrMHx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Vsu0gA==","etag":"CIGhrMHx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1","name":"composed1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835858962957","metageneration":"1","timeCreated":"2019-05-02T22:24:18.962Z","updated":"2019-05-02T22:24:18.962Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:18.962Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?generation=1556835858962957&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CI2048Lx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CI2048Lx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2","name":"composed2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835859564799","metageneration":"1","contentType":"text/json","timeCreated":"2019-05-02T22:24:19.564Z","updated":"2019-05-02T22:24:19.564Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:19.564Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?generation=1556835859564799&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CP+RiMPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content","name":"content","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835874352207","metageneration":"1","contentType":"image/jpeg","timeCreated":"2019-05-02T22:24:34.351Z","updated":"2019-05-02T22:24:34.351Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:34.351Z","size":"54","md5Hash":"N8p8/s9FwdAAnlvr/lEAjQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?generation=1556835874352207&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CM/Yjsrx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"GoUbsQ==","etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption","name":"customer-encryption","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835875058680","metageneration":"3","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:35.058Z","updated":"2019-05-02T22:24:36.225Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:35.058Z","size":"11","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?generation=1556835875058680&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPjnucrx/eECEAM="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"etag":"CPjnucrx/eECEAM=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2","name":"customer-encryption-2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835880717506","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:40.717Z","updated":"2019-05-02T22:24:40.717Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:40.717Z","size":"11","md5Hash":"xwWNFa0VdXPmlAwrlcAJcg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?generation=1556835880717506&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CMKZk83x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"r0NGrg==","etag":"CMKZk83x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3","name":"customer-encryption-3","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835879859440","metageneration":"1","timeCreated":"2019-05-02T22:24:39.859Z","updated":"2019-05-02T22:24:39.859Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:39.859Z","size":"22","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?generation=1556835879859440&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPDp3szx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"componentCount":2,"etag":"CPDp3szx/eECEAE=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test","name":"gzip-test","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835860251867","metageneration":"1","contentType":"application/x-gzip","timeCreated":"2019-05-02T22:24:20.251Z","updated":"2019-05-02T22:24:20.251Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:20.251Z","size":"27","md5Hash":"OtCw+aRRIRqKGFAEOax+qw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?generation=1556835860251867&alt=media","contentEncoding":"gzip","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNuJssPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"9DhwBA==","etag":"CNuJssPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1","name":"hashesOnUpload-1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835885051680","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:45.051Z","updated":"2019-05-02T22:24:45.051Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:45.051Z","size":"27","md5Hash":"ofZjGlcXPJiGOAfKFbJl1Q==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?generation=1556835885051680&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CKDem8/x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"cH+A+w==","etag":"CKDem8/x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"4","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:17.121Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/allUsers","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"allUsers","role":"READER","etag":"CLnS+bvx/eECEAQ="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc","name":"posc","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835882760607","metageneration":"1","timeCreated":"2019-05-02T22:24:42.760Z","updated":"2019-05-02T22:24:42.760Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:42.760Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?generation=1556835882760607&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ/zj87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CJ/zj87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2","name":"posc2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883152770","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.152Z","updated":"2019-05-02T22:24:43.152Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:43.152Z","size":"3","md5Hash":"9WGq9u8L8U1CCLtGpMyzrQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?generation=1556835883152770&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CILrp87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"17qABQ==","etag":"CILrp87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL","name":"signedURL","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835861141206","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:21.140Z","updated":"2019-05-02T22:24:21.140Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:21.140Z","size":"29","md5Hash":"Jyxvgwm9n2MsrGTMPbMeYA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?generation=1556835861141206&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNat6MPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"ZTqALw==","etag":"CNat6MPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object","name":"some-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835988273530","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:26:28.273Z","updated":"2019-05-02T22:26:28.273Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:26:28.273Z","size":"16","md5Hash":"tHacfSKfByC+/+clG7q+jw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?generation=1556835988273530&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPryt4Dy/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Sm1gKw==","etag":"CPryt4Dy/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object","name":"zero-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835856537016","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:16.536Z","updated":"2019-05-02T22:24:16.536Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:16.536Z","size":"0","md5Hash":"1B2M2Y8AsgTpgAmY7PhCfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?generation=1556835856537016&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLirz8Hx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AAAAAA==","etag":"CLirz8Hx/eECEAE="}]}" + } + }, + { + "ID": "744cbe6970c9623b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfI-4jHQ7K_XJuo8cMIfOvPLKerfuQA1KlRDBi4Fq11Uc9i--oKNuFL_MbH0CCxK8PeI4r4fmZILF9hdSGAMMpMmyX0w67j7t84C4tRDx2LnbzG4I" + ] + }, + "Body": "" + } + }, + { + "ID": "a2bea52380600211", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrzTcVhZWJuBZITVX_K6haiVlOGb5fEYRdiiH8ODa4XsVSPl4FCDs7zPpKMTfoZA1veIMd4ccdcj8rUqWDMcRfDU5l4cOGvNhGZV-h9S2AVJfl6GdE" + ] + }, + "Body": "" + } + }, + { + "ID": "e187777c49170a6d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UouPwITheFghtKlzs2vxoAGnEm_V7OZZnYkk_0pLhV8g5YN6TPRS2n1h0iAtzkuFXYtjovp3ifqt_351LO6-CobCar6VgRnJ-_EQ9WgOWhIC9DzYTs" + ] + }, + "Body": "" + } + }, + { + "ID": "653fa745ed683364", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoBLZLK-ePmvYxO2ByvMQ1dXb_4-5yhOR-1nYkxjkINZf5fY9ptO2H4RfRtbIrRQ-QQNZHqZGgCOGJakKLkNgVI2_ExobQLkJ-JaJ-ViDWL9GZCEgU" + ] + }, + "Body": "" + } + }, + { + "ID": "91230665f80c46f7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upasg8dCwbL7dWntUy3O1t_wWVeDNNT3YCz1uVNL8QUNh4z75y0p9OfL57wFX4CUv3NV06Oi9f7a85m42BzgDZl-kqmv4com_INYmRYjKbwrTdFB8A" + ] + }, + "Body": "" + } + }, + { + "ID": "dc68e1f94de4fa2a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXdUtFVSSFIrK72haTC8pKLogl9pB4lDLpARc8oDKdNt1yfbHj4VAQH0M_Doqj1mLY2LVmThFxvG_1mvtVBm-N4x_J-c5Izq46lZh_xx3wOQEYZKQ" + ] + }, + "Body": "" + } + }, + { + "ID": "db5b5c31ef8e503e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2nVuDhUpL1-5n7rzvboeHuOBlvh-59eoy59Y5ZIyvUy6FXYj2eCphVTBFkaIPXK7XIS6DJXkPzZABjiFi4hMA7Ewdgp860sYu44qguzhTfOymXYY" + ] + }, + "Body": "" + } + }, + { + "ID": "f44103066254972d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqul73HMDr4p6XCqdfKz99RILwd68VT50DnynneOl5m5E_a7ky2mbMA8mdM6xyAWCQtiJm8dCxvIuKYtjLavA8JUlkGIqqCLGfLbB2EvWahd55Fw_A" + ] + }, + "Body": "" + } + }, + { + "ID": "99aa9c86d4998414", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfk5JfeEeFo80cy1Y-OxXgzSIbt_x7qDWML2yJopIrt9DsPmRq4cDVFqg6sUFStj6aMrjxP7iu_tbwZzS_3BtnwqfPrJZvHllZ7tdeFTXLmto742o" + ] + }, + "Body": "" + } + }, + { + "ID": "24bd829460fb4df6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprF0LO3CObAApP8iwUO6IvC-Yh-5whtr0F_mMzaj0rltEl4C-AZjNuBMGuQUue-v9oBqb-nQU2ykIenSZ3UYnK4XVV7moFIS43VyP7pByYqsHPkMo" + ] + }, + "Body": "" + } + }, + { + "ID": "acdf7531eddfd72c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAEWwZD_lXrdbrjMg-PYbmLP1-zSzjESAs-osetXJrFTueJ6ufxVfA3xkMG5KjtEz-68YryfKl9gAGsgfseX6H5COwltvyW6pJSdxDdA-dTV_S8eU" + ] + }, + "Body": "" + } + }, + { + "ID": "8facd7ede0dc71a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrInkaBVPO1ip6mFSpF0Nihd1wYE9HNfIz8HfFsZkiEVG1mX7eKMItsbmNwWMehWZ0qaKAf081imDi7DmKqs6mEy7vyTokMNXVUp5LrFoQ-yDfVRuc" + ] + }, + "Body": "" + } + }, + { + "ID": "6affac5f0a8dea5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhbUYzZ-vUqGcyfzGFaQrr89Q05cXGCB0EbWA2Dy8dmQxB3uWJ7_FUwi6Ish-VMpOrZv78bT9qAHd5Tr5b3s8bTCX6QR9HlelvxkKdyrUv2G8QQuY" + ] + }, + "Body": "" + } + }, + { + "ID": "8258a6e1b5dc73aa", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upola6CIBa9ONjl53PxMpFGQkHvYEau96KG2mgCmUDWDhxZKCw0T58QQRx8OIJW5sisH_acrPcg2o-LLdJgOxCtlVSw-t6wGisVL2-TSopEuezgmu4" + ] + }, + "Body": "" + } + }, + { + "ID": "12186a37ecd09333", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFxLaOTSsPe7hDbzBznGCoZvAUmeNgWBwS45X2j3rpLlk8_MIFFuCa-aBpAM83d23ixudKpklnhiqCcrQQotCUZhkrMLpWLHBJdmcM0d5rCKL6opY" + ] + }, + "Body": "" + } + }, + { + "ID": "e7a92beca78e4ec6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrpITV4ogt1-_eCsp0KFz7tVeM2A4FzSevs8Q0BPZ36nEPrP9aAaB4nfWASzLZgi2hNM1l9eFkf5UbYYWDT5J-nQFCapVNcm_4Nr43yd94Ce95SVps" + ] + }, + "Body": "" + } + }, + { + "ID": "5bb0506f3ca9135d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFBt86Wz7Oqelsb0pPpIskBYdMYLR3XuBmD2F5DwyXt84Q1AUyL0Q94YtqTJP1yzgohHCkJWSGibqUARgP8Ilv9v4lVipsdfjSdFHgIjFJ1H-vuU4" + ] + }, + "Body": "" + } + }, + { + "ID": "3ea05ac204d7a112", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2mLLaTTO-0iqVf4m8WXPEsP-mHlaS6hz_gavjGI5yzU0jCjBNDRw1gj6Wa_cQIBzfjytwP2XYhBIJeAglwa6Uqv87WhzeqgGXt4u820FuZKVgY-w" + ] + }, + "Body": "" + } + }, + { + "ID": "bed8580dc40e850e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UodR7nshMeihBskPB6loBZoNDRFEUxojhFCKhQ0NpiEMRHY0ZHU8ncI4Sr7FHUmIDXyK8xVg_I7Qt5ESrqnGmRzW0h1eM8OGfEM-3weHyzMqN-VMik" + ] + }, + "Body": "" + } + }, + { + "ID": "57489f1d9de0ddb8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxQ_NSeD-rrAbZXERIaDr3_1wDC_RZ4MyuFZnOhdDqLkn4qub7rAIamqSQKDD8-jbn0c3XmxvowTghxhq4tZaxxHTmYJtB_FPvJxRZtCt3Leugx4g" + ] + }, + "Body": "" + } + }, + { + "ID": "006b3176688c0ac8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4l8LfaJI78MH37sa7nOmY2A8EB4paZccrewnIFFud-pwGLeVf067ZiRaBAOAPsPYlaxxtsr6gtECA6UY0xiUBHPTLFOe4VNnNQpniNWYJEfZ1-I8" + ] + }, + "Body": "" + } + }, + { + "ID": "a853ce25e734dbfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026pageToken=\u0026prefix=go-integration-test\u0026prettyPrint=false\u0026project=deklerk-sandbox\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "47300" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnXhEY-kWgvl-XqP1kXnVDpQphHD9znE-EyBBlFT-kHZtXEsAW5QG5PuzlaJPfQUwZqnTHP0wxu6YDbRCgDMB31UR760_lfEiX88PtVKIdTgCWm5A" + ] + }, + "Body": "{"kind":"storage#buckets","items":[{"kind":"storage#bucket","id":"go-integration-test-20190502-66597705133146-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66597705133146-0001","timeCreated":"2019-05-02T18:29:58.601Z","updated":"2019-05-02T18:29:58.601Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66611506907602-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66611506907602-0001","timeCreated":"2019-05-02T18:30:12.264Z","updated":"2019-05-02T18:30:12.264Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66633965420818-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66633965420818-0001","timeCreated":"2019-05-02T18:30:34.662Z","updated":"2019-05-02T18:30:34.662Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-78294113514519-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-78294113514519-0001","timeCreated":"2019-05-02T21:44:54.960Z","updated":"2019-05-02T21:44:54.960Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0017","timeCreated":"2019-05-02T22:05:34.375Z","updated":"2019-05-02T22:05:36.341Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:34.375Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0018","timeCreated":"2019-05-02T22:05:37.733Z","updated":"2019-05-02T22:05:37.733Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:37.733Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79911595924903-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79911595924903-0001","timeCreated":"2019-05-02T22:11:52.358Z","updated":"2019-05-02T22:11:52.358Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0001","timeCreated":"2019-05-02T22:12:09.706Z","updated":"2019-05-02T22:13:41.634Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"l1":"v2","new":"new"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0017","timeCreated":"2019-05-02T22:14:17.486Z","updated":"2019-05-02T22:14:18.905Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:17.486Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0018","timeCreated":"2019-05-02T22:14:20.105Z","updated":"2019-05-02T22:14:20.105Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:20.105Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0001","timeCreated":"2019-05-02T22:18:47.398Z","updated":"2019-05-02T22:18:47.398Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0002","timeCreated":"2019-05-02T22:18:48.114Z","updated":"2019-05-02T22:18:48.114Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0001","timeCreated":"2019-05-02T22:18:54.701Z","updated":"2019-05-02T22:18:54.701Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0002","timeCreated":"2019-05-02T22:18:55.293Z","updated":"2019-05-02T22:18:55.293Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0001","timeCreated":"2019-05-02T22:20:46.897Z","updated":"2019-05-02T22:22:07.429Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"new":"new","l1":"v2"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0017","timeCreated":"2019-05-02T22:22:50.428Z","updated":"2019-05-02T22:22:51.935Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:50.428Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0018","timeCreated":"2019-05-02T22:22:52.962Z","updated":"2019-05-02T22:22:52.962Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:52.962Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0017","timeCreated":"2019-05-02T22:26:01.908Z","updated":"2019-05-02T22:26:03.544Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:01.908Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0018","timeCreated":"2019-05-02T22:26:15.097Z","updated":"2019-05-02T22:26:15.097Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:15.097Z"},"storageClass":"STANDARD","etag":"CAE="}]}" + } + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 000000000..a11659212 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,260 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "unicode/utf8" + + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. If you perform many concurrent + // writes of small objects, you may wish set ChunkSize to a value that matches + // your objects' sizes to avoid consuming large amounts of memory. + // + // ChunkSize must be set before the first Write call. + ChunkSize int + + // ProgressFunc can be used to monitor the progress of a large write. + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + obj *ObjectAttrs + + mu sync.Mutex + err error +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + go w.monitorCancel() + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + if w.o.c.envHost != "" { + w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + + if w.ProgressFunc != nil { + call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) + } + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } + setClientHeader(call.Header()) + + // The internals that perform call.Do automatically retry + // uploading chunks, hence no need to add retries here. + // See issue https://github.com/googleapis/google-cloud-go/issues/1507. + // + // However, since this whole call's internals involve making the initial + // resumable upload session, the first HTTP request is not retried. + // TODO: Follow-up with google.golang.org/gensupport to solve + // https://github.com/googleapis/google-api-go-client/issues/392. + resp, err = call.Do() + } + if err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *Writer) Write(p []byte) (n int, err error) { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + n, err = w.pw.Write(p) + if err != nil { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + // Preserve existing functionality that when context is canceled, Write will return + // context.Canceled instead of "io: read/write on closed pipe". This hides the + // pipe implementation detail from users and makes Write seem as though it's an RPC. + if werr == context.Canceled || werr == context.DeadlineExceeded { + return n, werr + } + } + return n, err +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + + // Closing either the read or write causes the entire pipe to close. + if err := w.pw.Close(); err != nil { + return err + } + + <-w.donec + w.mu.Lock() + defer w.mu.Unlock() + return w.err +} + +// monitorCancel is intended to be used as a background goroutine. It monitors the +// context, and when it observes that the context has been canceled, it manually +// closes things that do not take a context. +func (w *Writer) monitorCancel() { + select { + case <-w.ctx.Done(): + w.mu.Lock() + werr := w.ctx.Err() + w.err = werr + w.mu.Unlock() + + // Closing either the read or write causes the entire pipe to close. + w.CloseWithError(werr) + case <-w.donec: + } +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore new file mode 100644 index 000000000..4473da19b --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -0,0 +1,53 @@ +# Ignore docs files +_gh_pages +_site + +# Ignore temporary files +README.html +coverage.out +.tmp + +# Numerous always-ignore extensions +*.diff +*.err +*.log +*.orig +*.rej +*.swo +*.swp +*.vi +*.zip +*~ + +# OS or Editor folders +._* +.cache +.DS_Store +.idea +.project +.settings +.tmproj +*.esproj +*.sublime-project +*.sublime-workspace +nbproject +Thumbs.db + +# Komodo +.komodotools +*.komodoproject + +# SCSS-Lint +scss-lint-report.xml + +# grunt-contrib-sass cache +.sass-cache + +# Jekyll metadata +docs/.jekyll-metadata + +# Folders to ignore +.build +.test +bower_components +node_modules diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml new file mode 100644 index 000000000..a51a14466 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -0,0 +1,25 @@ +language: go +sudo: false +matrix: + fast_finish: true + include: + - go: 1.11.x + env: TEST_METHOD=goveralls + - go: 1.10.x + - go: tip + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x + allow_failures: + - go: tip + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x +script: ./test.sh $TEST_METHOD +notifications: + email: + on_success: never diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 000000000..716561d5d --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 000000000..726c2afb3 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 000000000..eaffaab94 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 000000000..9e4255879 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/go.mod b/vendor/github.com/agext/levenshtein/go.mod new file mode 100644 index 000000000..545d432be --- /dev/null +++ b/vendor/github.com/agext/levenshtein/go.mod @@ -0,0 +1 @@ +module github.com/agext/levenshtein diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 000000000..df69ce701 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 000000000..a85727b3e --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh new file mode 100644 index 000000000..c5ed72466 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/test.sh @@ -0,0 +1,10 @@ +set -ev + +if [[ "$1" == "goveralls" ]]; then + echo "Testing with goveralls..." + go get github.com/mattn/goveralls + $HOME/gopath/bin/goveralls -service=travis-ci +else + echo "Testing with go test..." + go test -v ./... +fi diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE new file mode 100644 index 000000000..212537886 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go new file mode 100644 index 000000000..ed749d4c5 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -0,0 +1,218 @@ +// Package cidr is a collection of assorted utilities for computing +// network and host addresses within network ranges. +// +// It expects a CIDR-type address structure where addresses are divided into +// some number of prefix bits representing the network and then the remaining +// suffix bits represent the host. +// +// For example, it can help to calculate addresses for sub-networks of a +// parent network, or to calculate host addresses within a particular prefix. +// +// At present this package is prioritizing simplicity of implementation and +// de-prioritizing speed and memory usage. Thus caution is advised before +// using this package in performance-critical applications or hot codepaths. +// Patches to improve the speed and memory usage may be accepted as long as +// they do not result in a significant increase in code complexity. +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +// Subnet takes a parent CIDR range and creates a subnet within it +// with the given number of additional prefix bits and the given +// network number. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number +// of 5, becomes 10.3.5.0/24 . +func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + newPrefixLen := parentLen + newBits + + if newPrefixLen > addrLen { + return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) + } + + maxNetNum := uint64(1< maxNetNum { + return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) + } + + return &net.IPNet{ + IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), + Mask: net.CIDRMask(newPrefixLen, addrLen), + }, nil +} + +// Host takes a parent CIDR range and turns it into a host IP address with +// the given host number. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func Host(base *net.IPNet, num int) (net.IP, error) { + ip := base.IP + mask := base.Mask + bigNum := big.NewInt(int64(num)) + + parentLen, addrLen := mask.Size() + hostLen := addrLen - parentLen + + maxHostNum := big.NewInt(int64(1)) + maxHostNum.Lsh(maxHostNum, uint(hostLen)) + maxHostNum.Sub(maxHostNum, big.NewInt(1)) + + numUint64 := big.NewInt(int64(bigNum.Uint64())) + if bigNum.Cmp(big.NewInt(0)) == -1 { + numUint64.Neg(bigNum) + numUint64.Sub(numUint64, big.NewInt(int64(1))) + bigNum.Sub(maxHostNum, numUint64) + } + + if numUint64.Cmp(maxHostNum) == 1 { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, bigNum, bitlength), nil +} + +// AddressRange returns the first and last addresses in the given CIDR range. +func AddressRange(network *net.IPNet) (net.IP, net.IP) { + // the first IP is easy + firstIP := network.IP + + // the last IP is the network address OR NOT the mask address + prefixLen, bits := network.Mask.Size() + if prefixLen == bits { + // Easy! + // But make sure that our two slices are distinct, since they + // would be in all other cases. + lastIP := make([]byte, len(firstIP)) + copy(lastIP, firstIP) + return firstIP, lastIP + } + + firstIPInt, bits := ipToInt(firstIP) + hostLen := uint(bits) - uint(prefixLen) + lastIPInt := big.NewInt(1) + lastIPInt.Lsh(lastIPInt, hostLen) + lastIPInt.Sub(lastIPInt, big.NewInt(1)) + lastIPInt.Or(lastIPInt, firstIPInt) + + return firstIP, intToIP(lastIPInt, bits) +} + +// AddressCount returns the number of distinct host addresses within the given +// CIDR range. +// +// Since the result is a uint64, this function returns meaningful information +// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. +func AddressCount(network *net.IPNet) uint64 { + prefixLen, bits := network.Mask.Size() + return 1 << (uint64(bits) - uint64(prefixLen)) +} + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := 0; j < len(subnets); j++ { + if i == j { + continue + } + + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go new file mode 100644 index 000000000..e5e6a2cf9 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -0,0 +1,37 @@ +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +func ipToInt(ip net.IP) (*big.Int, int) { + val := &big.Int{} + val.SetBytes([]byte(ip)) + if len(ip) == net.IPv4len { + return val, 32 + } else if len(ip) == net.IPv6len { + return val, 128 + } else { + panic(fmt.Errorf("Unsupported address length %d", len(ip))) + } +} + +func intToIP(ipInt *big.Int, bits int) net.IP { + ipBytes := ipInt.Bytes() + ret := make([]byte, bits/8) + // Pack our IP bytes into the end of the return array, + // since big.Int.Bytes() removes front zero padding. + for i := 1; i <= len(ipBytes); i++ { + ret[len(ret)-i] = ipBytes[len(ipBytes)-i] + } + return net.IP(ret) +} + +func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { + ipInt, totalBits := ipToInt(ip) + bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) + ipInt.Or(ipInt, bigNum) + return intToIP(ipInt, totalBits) +} diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 000000000..684b03b4a --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 000000000..5752e9ef8 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 000000000..81f3a7471 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 000000000..012bc690a --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl new file mode 100644 index 000000000..003ffbf59 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl @@ -0,0 +1,132 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; + ZWJSeq = ZWJGlue Extension; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl new file mode 100644 index 000000000..fb4511825 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1583 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xED 0xA0 0x80..0xFF #Cs [2048] .... + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts <= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 000000000..99849c0e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 000000000..9cf7eaf40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 000000000..1a3d106d5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 000000000..142a7a01c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 000000000..285e54d67 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 000000000..710eb432f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 000000000..645df2450 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,88 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 000000000..c022407f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,96 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 000000000..0fda42510 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 000000000..8958c32d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,194 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 000000000..920e9fddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,13 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 000000000..881d575f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 000000000..fd1e240f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,536 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 000000000..2866f9a7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 000000000..3718b26e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 000000000..66c5945db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,56 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 000000000..9c29f29af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 000000000..304fd1561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 000000000..4e076c183 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 000000000..0c60e612e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,230 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New("RequestError", "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 000000000..7d50b1557 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 000000000..ab69c7a6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 000000000..3ad1e798d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 000000000..4af592158 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,299 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + + m sync.RWMutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 000000000..43d4ed386 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,180 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..1a7af53a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,203 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 000000000..54c5cf733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 000000000..7fc91d9d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 000000000..1980c8c14 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 000000000..e15514958 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 000000000..531139e39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,55 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..2e528d130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,312 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 000000000..b20b63394 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,100 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + client stsiface.STSAPI + ExpiryWindow time.Duration + + tokenFilePath string + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return &WebIdentityRoleProvider{ + client: svc, + tokenFilePath: path, + roleARN: roleARN, + roleSessionName: roleSessionName, + } +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + b, err := ioutil.ReadFile(p.tokenFilePath) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + }) + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 000000000..25a66d1dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 000000000..4b19e2800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 000000000..5bacc791a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 000000000..82a3e345e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 000000000..54a99280c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 000000000..c7008d8c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,265 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 000000000..23bb639e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,207 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 000000000..ca0ee1dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 000000000..4fcb61618 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 000000000..d126764ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,170 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + err := req.Send() + + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + err := req.Send() + + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 000000000..4c5636e35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,152 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 000000000..87b9ff3ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,188 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services[svcName] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 000000000..452cefda6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,5651 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "me-south-1": endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appmesh": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "backup": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "connect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "docdb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fsx": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "groundstation": service{ + + Endpoints: endpoints{ + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotevents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lakeformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "session.qldb": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "sts.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{ + Hostname: "sts.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "api.ecr": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + "us-iso-east-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + + Endpoints: endpoints{ + "aws-iso-global": endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ProdFips": endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + "us-isob-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 000000000..ca8fc828e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 000000000..84316b92c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 000000000..9c936be6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,452 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 000000000..523ad79ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,308 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 000000000..0fdfcc56e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,351 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 000000000..fa06f7a8f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 000000000..91a6f277a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 000000000..6ed15b2ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,118 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 000000000..d9b37f4d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,18 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 000000000..185b07318 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,322 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 000000000..79f79602b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 000000000..9370fa50c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 000000000..8e332cce6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,670 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + SanitizeHostForHeader(httpReq) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 000000000..e36e468b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 000000000..de1292f45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,36 @@ +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 000000000..a7365cd1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 000000000..307fa0705 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 000000000..f093fc542 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,264 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 000000000..e84084da5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,276 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 000000000..09a44eb98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 000000000..8630683f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 000000000..4601f883c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 000000000..ea9ebb6f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 000000000..fec39dfc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 000000000..1c5a5391e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 000000000..7713ccfca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,259 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided an Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_IAM_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + creds := stscreds.NewWebIdentityCredentials( + &Session{ + Config: cfg, + Handlers: handlers.Copy(), + }, + roleARN, + sessionName, + filepath, + ) + + return creds, nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + ) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..7ec66e7e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,245 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..60a6f9ce2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,277 @@ +package session + +import ( + "os" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 000000000..7b0a942e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,660 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + + return s + } + + s := deprecatedNewSession(cfgs...) + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + err = fmt.Errorf("failed to enable CSM, %v", err) + s.Config.Logger.Log("ERROR:", err.Error()) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + return nil +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..d91ac93a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,491 @@ +package session + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen, It must either be a assume role + // or credentials. Assert if the credential type requires a role ARN, + // the ARN is also set. + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + } + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + + updateString(&cfg.Region, section, regionKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 000000000..244c86da0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 000000000..6aa2ed241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 000000000..bd082e9d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..8104793aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,806 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } + + return nil +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 000000000..455091540 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,207 @@ +package aws + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 000000000..6192b2455 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 000000000..0210d2720 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 000000000..d1548ebd8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 000000000..e83a99886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 000000000..0895d53cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 000000000..0b76999ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 000000000..25ce0fe13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 000000000..04345a54c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 000000000..91ba2a59d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 000000000..8d462f77e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 000000000..3b0ca7afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 000000000..582c024ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 000000000..e56dcee2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,349 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 000000000..24df543d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 000000000..e52ac399f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 000000000..a45c0bc56 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 000000000..8a84c7cbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 000000000..457287019 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 000000000..7f01cf7c7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 000000000..f82095ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 000000000..6bb696447 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 000000000..18f3fe893 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 000000000..305999d29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 000000000..94841c324 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 000000000..99915f7f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 000000000..7ffb4ae06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 000000000..0b9b0dfce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 000000000..6c443988b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 000000000..5aa9137e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 000000000..e5f005613 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 000000000..44898eed0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 000000000..810ec7f08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 000000000..0c9802d87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 000000000..f4651da2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,11 @@ +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 000000000..b1d93a33d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,24 @@ +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 000000000..38ea61afe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 000000000..7da8a49ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 000000000..ebcbc2b40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 000000000..ecc7bf82f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 000000000..4b972b2d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 000000000..150a60981 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 000000000..5481ef307 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 000000000..97937c8e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 000000000..5ea5a988b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 000000000..3b44dde2f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 000000000..e3fc0766a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 000000000..2dc012a66 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 000000000..d7d42db0a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 000000000..915b0fcaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 000000000..53831dff9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 000000000..864fb6704 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 000000000..ea0da79a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,250 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 000000000..776d11018 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 000000000..e21614a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 000000000..0cb99eb57 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 000000000..75866d012 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,246 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 000000000..f69c1efc9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 000000000..831b0110c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,69 @@ +package query + +import ( + "encoding/xml" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 000000000..1301b149d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,310 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 000000000..4366de2e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 000000000..74e361e07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,237 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 000000000..07a6187ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 000000000..05d4ff519 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,84 @@ +package protocol + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 000000000..da1a68111 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 000000000..cf981fe95 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,306 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 000000000..c1a511851 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 000000000..7108d3800 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,291 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 000000000..42f71648e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,159 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 000000000..b4a4e8c4a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,24952 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Please select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the CORS configuration information set for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. For information about +// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the tag-set from an existing object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the accelerate configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// Gets the access control policy for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the CORS configuration for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// No longer used, see the GetBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the lifecycle configuration information set on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see the GetBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Gets an object's current Legal Hold status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Gets the object lock configuration for a bucket. The rule specified in the +// object lock configuration will be applied by default to every new object +// placed in the specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// Retrieves an object's retention settings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all of the versions of objects in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on a bucket using access control lists (ACL). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the CORS configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// No longer used, see the PutBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// No longer used, see the PutBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Set the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// Applies a Legal Hold configuration to the specified object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// Places an object lock configuration on the specified bucket. The rule specified +// in the object lock configuration will be applied by default to every new +// object placed in the specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// Places an Object Retention configuration on an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +// +// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) +// in the Amazon Simple Storage Service API Reference. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of allowed origins and methods. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how a CSV-formatted input object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // The single character used to indicate a row should be ignored when present + // at the start of a row. + Comments *string `type:"string"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // The value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Th single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// Specifies a condition that must be met for a redirect to apply. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want Amazon S3 object lock to be enabled for the new + // bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the object lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// The container element for specifying the default object lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default object lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +type Delete struct { + _ struct{} `type:"structure"` + + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // It can take a while to propagate the deletion of a replication configuration + // to all Amazon S3 systems. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 should replicate delete makers. +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // The status of the delete marker replication. + // + // In the current implementation, Amazon S3 doesn't replicate the delete markers. + // The status must be Disabled. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // object lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket. +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Cross-Region Replication Additional Configuration: Change Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in + // the Amazon Simple Storage Service Developer Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store replicas of the object identified by the rule. + // + // A replication configuration can replicate objects to only one destination + // bucket. If there are multiple rules in your replication configuration, all + // rules must specify the same destination bucket. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The storage class to use when replicating objects, such as standard or reduced + // redundancy. By default, Amazon S3 uses the storage class of the source object + // to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Describes the server-side encryption that will be applied to the restore +// results. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. + // Amazon S3 uses this key to encrypt replica objects. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // Name of the bucket to get the notification configuration for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket containing the object whose Legal Hold status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose object lock configuration you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's object lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket containing the object whose retention settings you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The Legal Hold status for the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock expires. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for AWS Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // No longer used; use Filter instead. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an S3 location that will receive the results of the restore request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for object lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an object lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // The object lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this object lock retention expires. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an object lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// Specifies the Block Public Access configuration for an Amazon S3 bucket. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. + // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) + // in the Amazon Simple Storage Service Developer Guide. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token that allows Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Describes the versioning state of an Amazon S3 bucket. For more information, + // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) + // in the Amazon Simple Storage Service API Reference. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies website configuration parameters for an Amazon S3 bucket. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // The Legal Hold status that you want to apply to the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket containing the object that you want to place a Legal Hold on. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose object lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The object lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket that contains the object you want to apply this Object Retention + // configuration to. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions.j + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should replicate delete makers. + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object keyname prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority associated with the rule. If you specify multiple rules in a + // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts + // when filtering. If two or more rules identify the same object based on a + // specified filter, the rule with higher priority takes precedence. For example: + // + // * Same object quality prefix based filter criteria If prefixes you specified + // in multiple rules overlap + // + // * Same object qualify tag based filter criteria specified in multiple + // rules + // + // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // in the Amazon S3 Developer Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // an AWS KMS-Managed Key (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object keyname prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer +} + +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() +} + +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err + } + es.StreamCloser.Close() + + return nil +} + +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} + +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + } + + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) + + return r +} + +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + + return r.Err() +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) + } +} + +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) + } + + return nil +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + r.errVal.Store(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + + case "End": + return &EndEvent{}, nil + + case "Progress": + return &ProgressEvent{}, nil + + case "Records": + return &RecordsEvent{}, nil + + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` +} + +// String returns the string representation +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} + +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return + } + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() + + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // KMS master key ID to use for the default encryption. This parameter is allowed + // if and only if SSEAlgorithm is set to aws:kms. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` +} + +// String returns the string representation +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// an AWS KMS-Managed Key (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an AWS KMS-managed key. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type Tagging struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" +) + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" +) + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" +) + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 000000000..5c8ce5cc8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 000000000..9ba8a7887 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 000000000..23d386b16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,75 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 000000000..0def02255 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 000000000..4b65f7153 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,123 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 000000000..931cb17bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Please select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 000000000..a7fbc2de2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,155 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 000000000..8e6f3307d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 000000000..14d05f7b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 000000000..d17dcc9da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,99 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 000000000..b71c835de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 000000000..f6a69aed1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,40 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 000000000..5b63fac72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,88 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err == io.EOF { + // Only capture the error if an unmarshal error occurs that is not EOF, + // because S3 might send an error without a error message which causes + // the XML unmarshal to fail with EOF. + err = nil + } + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 000000000..2596c694b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 000000000..eb0a6a417 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2750 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) +// in the IAM User Guide. +// +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: +// +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service API operations. +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. +// +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers, and then how to use the information from these providers to +// get and use temporary security credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. +// +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// AWS account to which the keys belong. Access key IDs beginning with AKIA +// are long-term credentials for an IAM user or the AWS account root user. Access +// key IDs beginning with ASIA are temporary credentials that are created using +// STS operations. If the account in the response belongs to you, you can sign +// in as the root user and review your root user access keys. Then, you can +// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// adds a policy to your IAM user or role that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when an IAM user +// or role is denied access. To view an example response, see I Am Not Authorized +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// is 43,200 seconds (12 hours). Temporary credentials that are obtained by +// using AWS account root user credentials have a maximum duration of 3,600 +// seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM API operations. +// +// * You cannot call any STS API operations except GetCallerIdentity. +// +// Permissions +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. The only exception +// is when the credentials are used to access a resource that has a resource-based +// policy that specifically references the federated user session in the Principal +// element of the policy. When you pass session policies, the session permissions +// are the intersection of the IAM user policies and the session policies that +// you pass. This gives you a way to further restrict the permissions for a +// federated user. You cannot use session policies to grant more permissions +// than those that are defined in the permissions policy of the IAM user. For +// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercased letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the AWS account. + Account *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies shouldn't exceed 2048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 000000000..d5307fcaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 000000000..fcb720dca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// in the IAM User Guide. +// +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 000000000..41ea09c35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 000000000..185c914d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 000000000..e2e1d6efe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE new file mode 100644 index 000000000..aade9a58b --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/LICENSE @@ -0,0 +1,20 @@ +Original version Copyright © 2010 Fazlul Shahriar . Newer +portions Copyright © 2014 Blake Gentry . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go new file mode 100644 index 000000000..ea49987c0 --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go @@ -0,0 +1,510 @@ +package netrc + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type tkType int + +const ( + tkMachine tkType = iota + tkDefault + tkLogin + tkPassword + tkAccount + tkMacdef + tkComment + tkWhitespace +) + +var keywords = map[string]tkType{ + "machine": tkMachine, + "default": tkDefault, + "login": tkLogin, + "password": tkPassword, + "account": tkAccount, + "macdef": tkMacdef, + "#": tkComment, +} + +type Netrc struct { + tokens []*token + machines []*Machine + macros Macros + updateLock sync.Mutex +} + +// FindMachine returns the Machine in n named by name. If a machine named by +// name exists, it is returned. If no Machine with name name is found and there +// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil +// is returned. +func (n *Netrc) FindMachine(name string) (m *Machine) { + // TODO(bgentry): not safe for concurrency + var def *Machine + for _, m = range n.machines { + if m.Name == name { + return m + } + if m.IsDefault() { + def = m + } + } + if def == nil { + return nil + } + return def +} + +// MarshalText implements the encoding.TextMarshaler interface to encode a +// Netrc into text format. +func (n *Netrc) MarshalText() (text []byte, err error) { + // TODO(bgentry): not safe for concurrency + for i := range n.tokens { + switch n.tokens[i].kind { + case tkComment, tkDefault, tkWhitespace: // always append these types + text = append(text, n.tokens[i].rawkind...) + default: + if n.tokens[i].value != "" { // skip empty-value tokens + text = append(text, n.tokens[i].rawkind...) + } + } + if n.tokens[i].kind == tkMacdef { + text = append(text, ' ') + text = append(text, n.tokens[i].macroName...) + } + text = append(text, n.tokens[i].rawvalue...) + } + return +} + +func (n *Netrc) NewMachine(name, login, password, account string) *Machine { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + prefix := "\n" + if len(n.tokens) == 0 { + prefix = "" + } + m := &Machine{ + Name: name, + Login: login, + Password: password, + Account: account, + + nametoken: &token{ + kind: tkMachine, + rawkind: []byte(prefix + "machine"), + value: name, + rawvalue: []byte(" " + name), + }, + logintoken: &token{ + kind: tkLogin, + rawkind: []byte("\n\tlogin"), + value: login, + rawvalue: []byte(" " + login), + }, + passtoken: &token{ + kind: tkPassword, + rawkind: []byte("\n\tpassword"), + value: password, + rawvalue: []byte(" " + password), + }, + accounttoken: &token{ + kind: tkAccount, + rawkind: []byte("\n\taccount"), + value: account, + rawvalue: []byte(" " + account), + }, + } + n.insertMachineTokensBeforeDefault(m) + for i := range n.machines { + if n.machines[i].IsDefault() { + n.machines = append(append(n.machines[:i], m), n.machines[i:]...) + return m + } + } + n.machines = append(n.machines, m) + return m +} + +func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { + newtokens := []*token{m.nametoken} + if m.logintoken.value != "" { + newtokens = append(newtokens, m.logintoken) + } + if m.passtoken.value != "" { + newtokens = append(newtokens, m.passtoken) + } + if m.accounttoken.value != "" { + newtokens = append(newtokens, m.accounttoken) + } + for i := range n.tokens { + if n.tokens[i].kind == tkDefault { + // found the default, now insert tokens before it + n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) + return + } + } + // didn't find a default, just add the newtokens to the end + n.tokens = append(n.tokens, newtokens...) + return +} + +func (n *Netrc) RemoveMachine(name string) { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + for i := range n.machines { + if n.machines[i] != nil && n.machines[i].Name == name { + m := n.machines[i] + for _, t := range []*token{ + m.nametoken, m.logintoken, m.passtoken, m.accounttoken, + } { + n.removeToken(t) + } + n.machines = append(n.machines[:i], n.machines[i+1:]...) + return + } + } +} + +func (n *Netrc) removeToken(t *token) { + if t != nil { + for i := range n.tokens { + if n.tokens[i] == t { + n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) + return + } + } + } +} + +// Machine contains information about a remote machine. +type Machine struct { + Name string + Login string + Password string + Account string + + nametoken *token + logintoken *token + passtoken *token + accounttoken *token +} + +// IsDefault returns true if the machine is a "default" token, denoted by an +// empty name. +func (m *Machine) IsDefault() bool { + return m.Name == "" +} + +// UpdatePassword sets the password for the Machine m. +func (m *Machine) UpdatePassword(newpass string) { + m.Password = newpass + updateTokenValue(m.passtoken, newpass) +} + +// UpdateLogin sets the login for the Machine m. +func (m *Machine) UpdateLogin(newlogin string) { + m.Login = newlogin + updateTokenValue(m.logintoken, newlogin) +} + +// UpdateAccount sets the login for the Machine m. +func (m *Machine) UpdateAccount(newaccount string) { + m.Account = newaccount + updateTokenValue(m.accounttoken, newaccount) +} + +func updateTokenValue(t *token, value string) { + oldvalue := t.value + t.value = value + newraw := make([]byte, len(t.rawvalue)) + copy(newraw, t.rawvalue) + t.rawvalue = append( + bytes.TrimSuffix(newraw, []byte(oldvalue)), + []byte(value)..., + ) +} + +// Macros contains all the macro definitions in a netrc file. +type Macros map[string]string + +type token struct { + kind tkType + macroName string + value string + rawkind []byte + rawvalue []byte +} + +// Error represents a netrc file parse error. +type Error struct { + LineNum int // Line number + Msg string // Error message +} + +// Error returns a string representation of error e. +func (e *Error) Error() string { + return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) +} + +func (e *Error) BadDefaultOrder() bool { + return e.Msg == errBadDefaultOrder +} + +const errBadDefaultOrder = "default token must appear after all machine tokens" + +// scanLinesKeepPrefix is a split function for a Scanner that returns each line +// of text. The returned token may include newlines if they are before the +// first non-space character. The returned line may be empty. The end-of-line +// marker is one optional carriage return followed by one mandatory newline. In +// regular expression notation, it is `\r?\n`. The last non-empty line of +// input will be returned even if it has no newline. +func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { + // We have a full newline-terminated line. + return start + i, data[0 : start+i], nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// scanWordsKeepPrefix is a split function for a Scanner that returns each +// space-separated word of text, with prefixing spaces included. It will never +// return an empty string. The definition of space is set by unicode.IsSpace. +// +// Adapted from bufio.ScanWords(). +func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if atEOF && len(data) == 0 || start == len(data) { + return len(data), data, nil + } + if len(data) > start && data[start] == '#' { + return scanLinesKeepPrefix(data, atEOF) + } + // Scan until space, marking end of word. + for width, i := 0, start; i < len(data); i += width { + var r rune + r, width = utf8.DecodeRune(data[i:]) + if unicode.IsSpace(r) { + return i, data[:i], nil + } + } + // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. + if atEOF && len(data) > start { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +func newToken(rawb []byte) (*token, error) { + _, tkind, err := bufio.ScanWords(rawb, true) + if err != nil { + return nil, err + } + var ok bool + t := token{rawkind: rawb} + t.kind, ok = keywords[string(tkind)] + if !ok { + trimmed := strings.TrimSpace(string(tkind)) + if trimmed == "" { + t.kind = tkWhitespace // whitespace-only, should happen only at EOF + return &t, nil + } + if strings.HasPrefix(trimmed, "#") { + t.kind = tkComment // this is a comment + return &t, nil + } + return &t, fmt.Errorf("keyword expected; got " + string(tkind)) + } + return &t, nil +} + +func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { + if scanner.Scan() { + raw := scanner.Bytes() + pos += bytes.Count(raw, []byte{'\n'}) + return raw, strings.TrimSpace(string(raw)), pos, nil + } + if err := scanner.Err(); err != nil { + return nil, "", pos, &Error{pos, err.Error()} + } + return nil, "", pos, nil +} + +func parse(r io.Reader, pos int) (*Netrc, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} + + defaultSeen := false + var currentMacro *token + var m *Machine + var t *token + scanner := bufio.NewScanner(bytes.NewReader(b)) + scanner.Split(scanTokensKeepPrefix) + + for scanner.Scan() { + rawb := scanner.Bytes() + if len(rawb) == 0 { + break + } + pos += bytes.Count(rawb, []byte{'\n'}) + t, err = newToken(rawb) + if err != nil { + if currentMacro == nil { + return nil, &Error{pos, err.Error()} + } + currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) + continue + } + + if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { + // if macro rawvalue + rawb would contain \n\n, then macro def is over + currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") + nrc.macros[currentMacro.macroName] = currentMacro.value + currentMacro = nil + } + + switch t.kind { + case tkMacdef: + if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + currentMacro = t + case tkDefault: + if defaultSeen { + return nil, &Error{pos, "multiple default token"} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + m.Name = "" + defaultSeen = true + case tkMachine: + if defaultSeen { + return nil, &Error{pos, errBadDefaultOrder} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Name + m.nametoken = t + case tkLogin: + if m == nil || m.Login != "" { + return nil, &Error{pos, "unexpected token login "} + } + if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Login + m.logintoken = t + case tkPassword: + if m == nil || m.Password != "" { + return nil, &Error{pos, "unexpected token password"} + } + if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Password + m.passtoken = t + case tkAccount: + if m == nil || m.Account != "" { + return nil, &Error{pos, "unexpected token account"} + } + if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Account + m.accounttoken = t + } + + nrc.tokens = append(nrc.tokens, t) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + return &nrc, nil +} + +// ParseFile opens the file at filename and then passes its io.Reader to +// Parse(). +func ParseFile(filename string) (*Netrc, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + return Parse(fd) +} + +// Parse parses from the the Reader r as a netrc file and returns the set of +// machine information and macros defined in it. The ``default'' machine, +// which is intended to be used when no machine name matches, is identified +// by an empty machine name. There can be only one ``default'' machine. +// +// If there is a parsing error, an Error is returned. +func Parse(r io.Reader) (*Netrc, error) { + return parse(r, 1) +} + +// FindMachine parses the netrc file identified by filename and returns the +// Machine named by name. If a problem occurs parsing the file at filename, an +// error is returned. If a machine named by name exists, it is returned. If no +// Machine with name name is found and there is a ``default'' machine, the +// ``default'' machine is returned. Otherwise, nil is returned. +func FindMachine(filename, name string) (m *Machine, err error) { + n, err := ParseFile(filename) + if err != nil { + return nil, err + } + return n.FindMachine(name), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 000000000..9e1311461 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 000000000..37d60fc35 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 000000000..ff177f612 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 000000000..fceda7518 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 000000000..71c1dd1b9 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 000000000..d99fda191 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 000000000..c2093a809 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000..bc52e96f2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000..792994785 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 000000000..205c28d68 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000..1be8ce945 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000..2e3d22f31 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000..aacaac6f1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000..f78d89fc1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000..b04edb7d7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 000000000..32c0e3388 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml new file mode 100644 index 000000000..95f8a1ff5 --- /dev/null +++ b/vendor/github.com/fatih/color/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.8.x + - tip + diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 000000000..7d879e9ca --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 000000000..ff1617f71 --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..3fc954460 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,179 @@ +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + + + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..91c8e9f06 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..cf1e96500 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..0f646931a --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..3cd3249f7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..63b0f08be --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 000000000..dea2617ce --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..3abfed2cf --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..f9b6e41b3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,301 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..fa88add30 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,607 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. + value interface{} + + // enc is the raw bytes for the extension field. + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return extensionAsLegacyType(e.value), nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = extensionAsStorageType(v) + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return extensionAsLegacyType(e.value), nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..fdd328bb7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,965 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..f48a75676 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..94fa9194a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,360 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + if deref { + u = u.Elem() + } + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..dbfffe071 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,313 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + } + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..a4b8c0cd3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..5cb11fa95 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2776 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } + sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + deref: deref, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 000000000..5525def6a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..acee2fc52 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2053 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..1aaee725b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,843 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..bb55a3af2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 000000000..1ded05bbe --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2887 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{0} +} + +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{1} +} + +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2} +} + +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4} +} + +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{5} +} + +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6} +} + +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{7} +} + +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{8} +} + +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{9} +} + +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18} +} + +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19} +} + +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20} +} + +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } + +var fileDescriptor_e5baabe45344a177 = []byte{ + // 2589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, + 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, + 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, + 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, + 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, + 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, + 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, + 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, + 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, + 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, + 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, + 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, + 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, + 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, + 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, + 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, + 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, + 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, + 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, + 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, + 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, + 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, + 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, + 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, + 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, + 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, + 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, + 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, + 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, + 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, + 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, + 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, + 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, + 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, + 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, + 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, + 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, + 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, + 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, + 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, + 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, + 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, + 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, + 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, + 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, + 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, + 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, + 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, + 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, + 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, + 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, + 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, + 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, + 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, + 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, + 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, + 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, + 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, + 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, + 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, + 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, + 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, + 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, + 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, + 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, + 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, + 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, + 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, + 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, + 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, + 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, + 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, + 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, + 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, + 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, + 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, + 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, + 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, + 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, + 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, + 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, + 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, + 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, + 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, + 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, + 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, + 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, + 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, + 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, + 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, + 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, + 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, + 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, + 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, + 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, + 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, + 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, + 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, + 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, + 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, + 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, + 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, + 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, + 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, + 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, + 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, + 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, + 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, + 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, + 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, + 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, + 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, + 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, + 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, + 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, + 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, + 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, + 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, + 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, + 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, + 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, + 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, + 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, + 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, + 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, + 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, + 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, + 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, + 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, + 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, + 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, + 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, + 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, + 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, + 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, + 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, + 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, + 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, + 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, + 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, + 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, + 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, + 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, + 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, + 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, + 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, + 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, + 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, + 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, + 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, + 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, + 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, + 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, + 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, + 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, + 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, + 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, + 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, + 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 000000000..ed08fcbc5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,883 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..70276e8f5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..78ee52334 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} + +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 000000000..493294255 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,154 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..c0d595da7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..26d1ca2fb --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..0d681ee21 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} + +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 000000000..975fce41a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..8da0df01a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + ts := &tspb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..31cd846de --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} + +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 000000000..eafb3fa03 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..2133562b0 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 000000000..abc3a1c3e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 000000000..59d4ee91b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..fe98dcc67 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..597b6ae56 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..3d2e42662 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..a9e7fc0b5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 000000000..01aed0a15 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 000000000..c0b667f58 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..ace1dbe86 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 000000000..0a01c4796 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 000000000..da134ae2a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..24fbae6e3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 000000000..06a8ffd03 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,48 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "math" + "reflect" +) + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..793448160 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..96fffd291 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..6ddf29993 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..17a05eede --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..2761b6289 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..eafcf2e4c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..8b8fcab7b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..83031a7f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 000000000..ae121a1e4 --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 000000000..37080b19b --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,320 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any time.Time that returns true +// for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()) +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, sv) + continue + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + if !reflect.Indirect(sv).IsValid() { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del byte + if opts.Contains("comma") { + del = ',' + } else if opts.Contains("space") { + del = ' ' + } else if opts.Contains("semicolon") { + del = ';' + } else if opts.Contains("brackets") { + name = name + "[]" + } + + if del != 0 { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteByte(del) + } + s.WriteString(valueString(sv.Index(i), opts)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts)) + } + } + continue + } + + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts)) + continue + } + + if sv.Kind() == reflect.Struct { + reflectValue(values, sv, name) + continue + } + + values.Add(name, valueString(sv, opts)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + if v.Type() == timeType { + return v.Interface().(time.Time).IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..9d92c11f1 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000..fc84cd79d --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b17461631 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..7f9e0c6c0 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..f326b54db --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..524404cc5 --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..199a1ac65 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..84af91c9f --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE new file mode 100644 index 000000000..6d16b6578 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 000000000..b1d53dd19 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,161 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 000000000..3fd1b0b84 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax-go version being used. +const Version = "2.0.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod new file mode 100644 index 000000000..9cdfaf447 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/googleapis/gax-go/v2 + +require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum new file mode 100644 index 000000000..7fa23ecf9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/go.sum @@ -0,0 +1,25 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 000000000..139371a0b --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 000000000..fe31dd004 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,99 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 000000000..444df08f8 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, "does not exist") { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 000000000..a733bef18 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 000000000..c9b84022c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 000000000..036e5313f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 000000000..8d306bf51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 000000000..05841092a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 000000000..310f07569 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 000000000..3c845dc0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml new file mode 100644 index 000000000..4fe9176aa --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/.travis.yml @@ -0,0 +1,24 @@ +sudo: false + +addons: + apt: + sources: + - sourceline: 'ppa:git-core/ppa' + packages: + - git + +language: go + +os: + - linux + - osx + +go: + - "1.11.x" + +before_script: + - go build ./cmd/go-getter + +branches: + only: + - master diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md new file mode 100644 index 000000000..3de23c709 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -0,0 +1,358 @@ +# go-getter + +[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-getter +[godocs]: http://godoc.org/github.com/hashicorp/go-getter +[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master + +go-getter is a library for Go (golang) for downloading files or directories +from various sources using a URL as the primary form of input. + +The power of this library is being flexible in being able to download +from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) +using a single string as input. This removes the burden of knowing how to +download from a variety of sources from the implementer. + +The concept of a _detector_ automatically turns invalid URLs into proper +URLs. For example: "github.com/hashicorp/go-getter" would turn into a +Git URL. Or "./foo" would turn into a file URL. These are extensible. + +This library is used by [Terraform](https://terraform.io) for +downloading modules and [Nomad](https://nomadproject.io) for downloading +binaries. + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-getter +``` + +go-getter also has a command you can use to test URL strings: + +``` +$ go install github.com/hashicorp/go-getter/cmd/go-getter +... + +$ go-getter github.com/foo/bar ./foo +... +``` + +The command is useful for verifying URL structures. + +## URL Format + +go-getter uses a single string URL as input to download from a variety of +protocols. go-getter has various "tricks" with this URL to do certain things. +This section documents the URL format. + +### Supported Protocols and Detectors + +**Protocols** are used to download files/directories using a specific +mechanism. Example protocols are Git and HTTP. + +**Detectors** are used to transform a valid or invalid URL into another +URL if it matches a certain pattern. Example: "github.com/user/repo" is +automatically transformed into a fully valid Git URL. This allows go-getter +to be very user friendly. + +go-getter out of the box supports the following protocols. Additional protocols +can be augmented at runtime by implementing the `Getter` interface. + + * Local files + * Git + * Mercurial + * HTTP + * Amazon S3 + * Google GCP + +In addition to the above protocols, go-getter has what are called "detectors." +These take a URL and attempt to automatically choose the best protocol for +it, which might involve even changing the protocol. The following detection +is built-in by default: + + * File paths such as "./foo" are automatically changed to absolute + file URLs. + * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically + changed to Git protocol over HTTP. + * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically + changed to a Git or mercurial protocol using the BitBucket API. + +### Forced Protocol + +In some cases, the protocol to use is ambiguous depending on the source +URL. For example, "http://github.com/mitchellh/vagrant.git" could reference +an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this +URL. + +Forced protocol can be done by prefixing the URL with the protocol followed +by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` +would download the given HTTP URL using the Git protocol. + +Forced protocols will also override any detectors. + +In the absence of a forced protocol, detectors may be run on the URL, transforming +the protocol anyways. The above example would've used the Git protocol either +way since the Git detector would've detected it was a GitHub URL. + +### Protocol-Specific Options + +Each protocol can support protocol-specific options to configure that +protocol. For example, the `git` protocol supports specifying a `ref` +query parameter that tells it what ref to checkout for that Git +repository. + +The options are specified as query parameters on the URL (or URL-like string) +given to go-getter. Using the Git example above, the URL below is a valid +input to go-getter: + + github.com/hashicorp/go-getter?ref=abcd1234 + +The protocol-specific options are documented below the URL format +section. But because they are part of the URL, we point it out here so +you know they exist. + +### Subdirectories + +If you want to download only a specific subdirectory from a downloaded +directory, you can specify a subdirectory after a double-slash `//`. +go-getter will first download the URL specified _before_ the double-slash +(as if you didn't specify a double-slash), but will then copy the +path after the double slash into the target directory. + +For example, if you're downloading this GitHub repository, but you only +want to download the `testdata` directory, you can do the following: + +``` +https://github.com/hashicorp/go-getter.git//testdata +``` + +If you downloaded this to the `/tmp` directory, then the file +`/tmp/archive.gz` would exist. Notice that this file is in the `testdata` +directory in this repository, but because we specified a subdirectory, +go-getter automatically copied only that directory contents. + +Subdirectory paths may contain may also use filesystem glob patterns. +The path must match _exactly one_ entry or go-getter will return an error. +This is useful if you're not sure the exact directory name but it follows +a predictable naming structure. + +For example, the following URL would also work: + +``` +https://github.com/hashicorp/go-getter.git//test-* +``` + +### Checksumming + +For file downloads of any protocol, go-getter can automatically verify +a checksum for you. Note that checksumming only works for downloading files, +not directories, but checksumming will work for any protocol. + +To checksum a file, append a `checksum` query parameter to the URL. go-getter +will parse out this query parameter automatically and use it to verify the +checksum. The parameter value can be in the format of `type:value` or just +`value`, where type is "md5", "sha1", "sha256", "sha512" or "file" . The +"value" should be the actual checksum value or download URL for "file". When +`type` part is omitted, type will be guessed based on the length of the +checksum string. Examples: + +``` +./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +``` +./foo.txt?checksum=b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +``` +./foo.txt?checksum=file:./foo.txt.sha256sum +``` + +When checksumming from a file - ex: with `checksum=file:url` - go-getter will +get the file linked in the URL after `file:` using the same configuration. For +example, in `file:http://releases.ubuntu.com/cosmic/MD5SUMS` go-getter will +download a checksum file under the aforementioned url using the http protocol. +All protocols supported by go-getter can be used. The checksum file will be +downloaded in a temporary file then parsed. The destination of the temporary +file can be changed by setting system specific environment variables: `TMPDIR` +for unix; `TMP`, `TEMP` or `USERPROFILE` on windows. Read godoc of +[os.TempDir](https://golang.org/pkg/os/#TempDir) for more information on the +temporary directory selection. Content of files are expected to be BSD or GNU +style. Once go-getter is done with the checksum file; it is deleted. + +The checksum query parameter is never sent to the backend protocol +implementation. It is used at a higher level by go-getter itself. + +If the destination file exists and the checksums match: download +will be skipped. + +### Unarchiving + +go-getter will automatically unarchive files into a file or directory +based on the extension of the file being requested (over any protocol). +This works for both file and directory downloads. + +go-getter looks for an `archive` query parameter to specify the format of +the archive. If this isn't specified, go-getter will use the extension of +the path to see if it appears archived. Unarchiving can be explicitly +disabled by setting the `archive` query parameter to `false`. + +The following archive formats are supported: + + * `tar.gz` and `tgz` + * `tar.bz2` and `tbz2` + * `tar.xz` and `txz` + * `zip` + * `gz` + * `bz2` + * `xz` + +For example, an example URL is shown below: + +``` +./foo.zip +``` + +This will automatically be inferred to be a ZIP file and will be extracted. +You can also be explicit about the archive type: + +``` +./some/other/path?archive=zip +``` + +And finally, you can disable archiving completely: + +``` +./some/path?archive=false +``` + +You can combine unarchiving with the other features of go-getter such +as checksumming. The special `archive` query parameter will be removed +from the URL before going to the final protocol downloader. + +## Protocol-Specific Options + +This section documents the protocol-specific options that can be specified for +go-getter. These options should be appended to the input as normal query +parameters ([HTTP headers](#headers) are an exception to this, however). +Depending on the usage of go-getter, applications may provide alternate ways of +inputting options. For example, [Nomad](https://www.nomadproject.io) provides a +nice options block for specifying options rather than in the URL. + +## General (All Protocols) + +The options below are available to all protocols: + + * `archive` - The archive format to use to unarchive this file, or "" (empty + string) to disable unarchiving. For more details, see the complete section + on archive support above. + + * `checksum` - Checksum to verify the downloaded file or archive. See + the entire section on checksumming above for format and more details. + + * `filename` - When in file download mode, allows specifying the name of the + downloaded file on disk. Has no effect in directory mode. + +### Local Files (`file`) + +None + +### Git (`git`) + + * `ref` - The Git ref to checkout. This is a ref, so it can point to + a commit SHA, a branch name, etc. If it is a named ref such as a branch + name, go-getter will update it to the latest on each get. + + * `sshkey` - An SSH private key to use during clones. The provided key must + be a base64-encoded string. For example, to generate a suitable `sshkey` + from a private key file on disk, you would run `base64 -w0 `. + + **Note**: Git 2.3+ is required to use this feature. + + * `depth` - The Git clone depth. The provided number specifies the last `n` + revisions to clone from the repository. + + +The `git` getter accepts both URL-style SSH addresses like +`git::ssh://git@example.com/foo/bar`, and "scp-style" addresses like +`git::git@example.com/foo/bar`. In the latter case, omitting the `git::` +force prefix is allowed if the username prefix is exactly `git@`. + +The "scp-style" addresses _cannot_ be used in conjunction with the `ssh://` +scheme prefix, because in that case the colon is used to mark an optional +port number to connect on, rather than to delimit the path from the host. + +### Mercurial (`hg`) + + * `rev` - The Mercurial revision to checkout. + +### HTTP (`http`) + +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. + +#### Headers + +Optional request headers can be added by supplying them in a custom +[`HttpGetter`](https://godoc.org/github.com/hashicorp/go-getter#HttpGetter) +(_not_ as query parameters like most other options). These headers will be sent +out on every request the getter in question makes. + +### S3 (`s3`) + +S3 takes various access configurations in the URL. Note that it will also +read these from standard AWS environment variables if they're set. S3 compliant servers like Minio +are also supported. If the query parameters are present, these take priority. + + * `aws_access_key_id` - AWS access key. + * `aws_access_key_secret` - AWS access key secret. + * `aws_access_token` - AWS access token if this is being used. + +#### Using IAM Instance Profiles with S3 + +If you use go-getter and want to use an EC2 IAM Instance Profile to avoid +using credentials, then just omit these and the profile, if available will +be used automatically. + +### Using S3 with Minio + If you use go-gitter for Minio support, you must consider the following: + + * `aws_access_key_id` (required) - Minio access key. + * `aws_access_key_secret` (required) - Minio access key secret. + * `region` (optional - defaults to us-east-1) - Region identifier to use. + * `version` (optional - defaults to Minio default) - Configuration file format. + +#### S3 Bucket Examples + +S3 has several addressing schemes used to reference your bucket. These are +listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + +Some examples for these addressing schemes: +- s3::https://s3.amazonaws.com/bucket/foo +- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo +- bucket.s3.amazonaws.com/foo +- bucket.s3-eu-west-1.amazonaws.com/foo/bar +- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" + +### GCS (`gcs`) + +#### GCS Authentication + +In order to access to GCS, authentication credentials should be provided. More information can be found [here](https://cloud.google.com/docs/authentication/getting-started) + +#### GCS Bucket Examples + +- gcs::https://www.googleapis.com/storage/v1/bucket +- gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip +- www.googleapis.com/storage/v1/bucket/foo diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml new file mode 100644 index 000000000..1e8718e17 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -0,0 +1,16 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2017 +clone_folder: c:\gopath\github.com\hashicorp\go-getter +environment: + GOPATH: c:\gopath +install: +- cmd: >- + echo %Path% + + go version + + go env + + go get -d -v -t ./... +build_script: +- cmd: go test ./... diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go new file mode 100644 index 000000000..eeccfea9d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/checksum.go @@ -0,0 +1,314 @@ +package getter + +import ( + "bufio" + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// FileChecksum helps verifying the checksum for a file. +type FileChecksum struct { + Type string + Hash hash.Hash + Value []byte + Filename string +} + +// A ChecksumError is returned when a checksum differs +type ChecksumError struct { + Hash hash.Hash + Actual []byte + Expected []byte + File string +} + +func (cerr *ChecksumError) Error() string { + if cerr == nil { + return "" + } + return fmt.Sprintf( + "Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T", + cerr.File, + hex.EncodeToString(cerr.Expected), + hex.EncodeToString(cerr.Actual), + cerr.Hash, // ex: *sha256.digest + ) +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func (c *FileChecksum) checksum(source string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + c.Hash.Reset() + if _, err := io.Copy(c.Hash, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) { + return &ChecksumError{ + Hash: c.Hash, + Actual: actual, + Expected: c.Value, + File: source, + } + } + + return nil +} + +// extractChecksum will return a FileChecksum based on the 'checksum' +// parameter of u. +// ex: +// http://hashicorp.com/terraform?checksum= +// http://hashicorp.com/terraform?checksum=: +// http://hashicorp.com/terraform?checksum=file: +// when checksumming from a file, extractChecksum will go get checksum_url +// in a temporary directory, parse the content of the file then delete it. +// Content of files are expected to be BSD style or GNU style. +// +// BSD-style checksum: +// MD5 (file1) = +// MD5 (file2) = +// +// GNU-style: +// file1 +// *file2 +// +// see parseChecksumLine for more detail on checksum file parsing +func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) { + q := u.Query() + v := q.Get("checksum") + + if v == "" { + return nil, nil + } + + vs := strings.SplitN(v, ":", 2) + switch len(vs) { + case 2: + break // good + default: + // here, we try to guess the checksum from it's length + // if the type was not passed + return newChecksumFromValue(v, filepath.Base(u.EscapedPath())) + } + + checksumType, checksumValue := vs[0], vs[1] + + switch checksumType { + case "file": + return c.ChecksumFromFile(checksumValue, u) + default: + return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) + } +} + +func newChecksum(checksumValue, filename string) (*FileChecksum, error) { + c := &FileChecksum{ + Filename: filename, + } + var err error + c.Value, err = hex.DecodeString(checksumValue) + if err != nil { + return nil, fmt.Errorf("invalid checksum: %s", err) + } + return c, nil +} + +func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + c.Type = strings.ToLower(checksumType) + switch c.Type { + case "md5": + c.Hash = md5.New() + case "sha1": + c.Hash = sha1.New() + case "sha256": + c.Hash = sha256.New() + case "sha512": + c.Hash = sha512.New() + default: + return nil, fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + return c, nil +} + +func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + switch len(c.Value) { + case md5.Size: + c.Hash = md5.New() + c.Type = "md5" + case sha1.Size: + c.Hash = sha1.New() + c.Type = "sha1" + case sha256.Size: + c.Hash = sha256.New() + c.Type = "sha256" + case sha512.Size: + c.Hash = sha512.New() + c.Type = "sha512" + default: + return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue) + } + + return c, nil +} + +// ChecksumFromFile will return all the FileChecksums found in file +// +// ChecksumFromFile will try to guess the hashing algorithm based on content +// of checksum file +// +// ChecksumFromFile will only return checksums for files that match file +// behind src +func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) { + checksumFileURL, err := urlhelper.Parse(checksumFile) + if err != nil { + return nil, err + } + + tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path)) + if err != nil { + return nil, err + } + defer os.Remove(tempfile) + + c2 := &Client{ + Ctx: c.Ctx, + Getters: c.Getters, + Decompressors: c.Decompressors, + Detectors: c.Detectors, + Pwd: c.Pwd, + Dir: false, + Src: checksumFile, + Dst: tempfile, + ProgressListener: c.ProgressListener, + } + if err = c2.Get(); err != nil { + return nil, fmt.Errorf( + "Error downloading checksum file: %s", err) + } + + filename := filepath.Base(src.Path) + absPath, err := filepath.Abs(src.Path) + if err != nil { + return nil, err + } + checksumFileDir := filepath.Dir(checksumFileURL.Path) + relpath, err := filepath.Rel(checksumFileDir, absPath) + switch { + case err == nil || + err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir: + // ex: on windows C:\gopath\...\content.txt cannot be relative to \ + // which is okay, may be another expected path will work. + break + default: + return nil, err + } + + // possible file identifiers: + options := []string{ + filename, // ubuntu-14.04.1-server-amd64.iso + "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum + "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p + relpath, // dir/ubuntu-14.04.1-server-amd64.iso + "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso + absPath, // fullpath; set if local + } + + f, err := os.Open(tempfile) + if err != nil { + return nil, fmt.Errorf( + "Error opening downloaded file: %s", err) + } + defer f.Close() + rd := bufio.NewReader(f) + for { + line, err := rd.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf( + "Error reading checksum file: %s", err) + } + break + } + checksum, err := parseChecksumLine(line) + if err != nil || checksum == nil { + continue + } + if checksum.Filename == "" { + // filename not sure, let's try + return checksum, nil + } + // make sure the checksum is for the right file + for _, option := range options { + if option != "" && checksum.Filename == option { + // any checksum will work so we return the first one + return checksum, nil + } + } + } + return nil, fmt.Errorf("no checksum found in: %s", checksumFile) +} + +// parseChecksumLine takes a line from a checksum file and returns +// checksumType, checksumValue and filename parseChecksumLine guesses the style +// of the checksum BSD vs GNU by splitting the line and by counting the parts. +// of a line. +// for BSD type sums parseChecksumLine guesses the hashing algorithm +// by checking the length of the checksum. +func parseChecksumLine(line string) (*FileChecksum, error) { + parts := strings.Fields(line) + + switch len(parts) { + case 4: + // BSD-style checksum: + // MD5 (file1) = + // MD5 (file2) = + if len(parts[1]) <= 2 || + parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' { + return nil, fmt.Errorf( + "Unexpected BSD-style-checksum filename format: %s", line) + } + filename := parts[1][1 : len(parts[1])-1] + return newChecksumFromType(parts[0], parts[3], filename) + case 2: + // GNU-style: + // file1 + // *file2 + return newChecksumFromValue(parts[0], parts[1]) + case 0: + return nil, nil // empty line + default: + return newChecksumFromValue(parts[0], "") + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go new file mode 100644 index 000000000..007a78ba7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -0,0 +1,298 @@ +package getter + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" +) + +// Client is a client for downloading things. +// +// Top-level functions such as Get are shortcuts for interacting with a client. +// Using a client directly allows more fine-grained control over how downloading +// is done, as well as customizing the protocols supported. +type Client struct { + // Ctx for cancellation + Ctx context.Context + + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Mode is the method of download the client will use. See ClientMode + // for documentation. + Mode ClientMode + + // Detectors is the list of detectors that are tried on the source. + // If this is nil, then the default Detectors will be used. + Detectors []Detector + + // Decompressors is the map of decompressors supported by this client. + // If this is nil, then the default value is the Decompressors global. + Decompressors map[string]Decompressor + + // Getters is the map of protocols supported by this client. If this + // is nil, then the default Getters variable will be used. + Getters map[string]Getter + + // Dir, if true, tells the Client it is downloading a directory (versus + // a single file). This distinction is necessary since filenames and + // directory names follow the same format so disambiguating is impossible + // without knowing ahead of time. + // + // WARNING: deprecated. If Mode is set, that will take precedence. + Dir bool + + // ProgressListener allows to track file downloads. + // By default a no op progress listener is used. + ProgressListener ProgressTracker + + Options []ClientOption +} + +// Get downloads the configured source to the destination. +func (c *Client) Get() error { + if err := c.Configure(c.Options...); err != nil { + return err + } + + // Store this locally since there are cases we swap this + mode := c.Mode + if mode == ClientModeInvalid { + if c.Dir { + mode = ClientModeDir + } else { + mode = ClientModeFile + } + } + + src, err := Detect(c.Src, c.Pwd, c.Detectors) + if err != nil { + return err + } + + // Determine if we have a forced protocol, i.e. "git::http://..." + force, src := getForcedGetter(src) + + // If there is a subdir component, then we download the root separately + // and then copy over the proper subdir. + var realDst string + dst := c.Dst + src, subDir := SourceDirSubdir(src) + if subDir != "" { + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + realDst = dst + dst = td + } + + u, err := urlhelper.Parse(src) + if err != nil { + return err + } + if force == "" { + force = u.Scheme + } + + g, ok := c.Getters[force] + if !ok { + return fmt.Errorf( + "download not supported for scheme '%s'", force) + } + + // We have magic query parameters that we use to signal different features + q := u.Query() + + // Determine if we have an archive type + archiveV := q.Get("archive") + if archiveV != "" { + // Delete the paramter since it is a magic parameter we don't + // want to pass on to the Getter + q.Del("archive") + u.RawQuery = q.Encode() + + // If we can parse the value as a bool and it is false, then + // set the archive to "-" which should never map to a decompressor + if b, err := strconv.ParseBool(archiveV); err == nil && !b { + archiveV = "-" + } + } + if archiveV == "" { + // We don't appear to... but is it part of the filename? + matchingLen := 0 + for k := range c.Decompressors { + if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { + archiveV = k + matchingLen = len(k) + } + } + } + + // If we have a decompressor, then we need to change the destination + // to download to a temporary path. We unarchive this into the final, + // real path. + var decompressDst string + var decompressDir bool + decompressor := c.Decompressors[archiveV] + if decompressor != nil { + // Create a temporary directory to store our archive. We delete + // this at the end of everything. + td, err := ioutil.TempDir("", "getter") + if err != nil { + return fmt.Errorf( + "Error creating temporary directory for archive: %s", err) + } + defer os.RemoveAll(td) + + // Swap the download directory to be our temporary path and + // store the old values. + decompressDst = dst + decompressDir = mode != ClientModeFile + dst = filepath.Join(td, "archive") + mode = ClientModeFile + } + + // Determine checksum if we have one + checksum, err := c.extractChecksum(u) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) + } + + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + + if mode == ClientModeAny { + // Ask the getter which client mode to use + mode, err = g.ClientMode(u) + if err != nil { + return err + } + + // Destination is the base name of the URL path in "any" mode when + // a file source is detected. + if mode == ClientModeFile { + filename := filepath.Base(u.Path) + + // Determine if we have a custom file name + if v := q.Get("filename"); v != "" { + // Delete the query parameter if we have it. + q.Del("filename") + u.RawQuery = q.Encode() + + filename = v + } + + dst = filepath.Join(dst, filename) + } + } + + // If we're not downloading a directory, then just download the file + // and return. + if mode == ClientModeFile { + getFile := true + if checksum != nil { + if err := checksum.checksum(dst); err == nil { + // don't get the file if the checksum of dst is correct + getFile = false + } + } + if getFile { + err := g.GetFile(dst, u) + if err != nil { + return err + } + + if checksum != nil { + if err := checksum.checksum(dst); err != nil { + return err + } + } + } + + if decompressor != nil { + // We have a decompressor, so decompress the current destination + // into the final destination with the proper mode. + err := decompressor.Decompress(decompressDst, dst, decompressDir) + if err != nil { + return err + } + + // Swap the information back + dst = decompressDst + if decompressDir { + mode = ClientModeAny + } else { + mode = ClientModeFile + } + } + + // We check the dir value again because it can be switched back + // if we were unarchiving. If we're still only Get-ing a file, then + // we're done. + if mode == ClientModeFile { + return nil + } + } + + // If we're at this point we're either downloading a directory or we've + // downloaded and unarchived a directory and we're just checking subdir. + // In the case we have a decompressor we don't Get because it was Get + // above. + if decompressor == nil { + // If we're getting a directory, then this is an error. You cannot + // checksum a directory. TODO: test + if checksum != nil { + return fmt.Errorf( + "checksum cannot be specified for directory download") + } + + // We're downloading a directory, which might require a bit more work + // if we're specifying a subdir. + err := g.Get(dst, u) + if err != nil { + err = fmt.Errorf("error downloading '%s': %s", src, err) + return err + } + } + + // If we have a subdir, copy that over + if subDir != "" { + if err := os.RemoveAll(realDst); err != nil { + return err + } + if err := os.MkdirAll(realDst, 0755); err != nil { + return err + } + + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } + + return copyDir(c.Ctx, realDst, subDir, false) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go new file mode 100644 index 000000000..7f02509a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_mode.go @@ -0,0 +1,24 @@ +package getter + +// ClientMode is the mode that the client operates in. +type ClientMode uint + +const ( + ClientModeInvalid ClientMode = iota + + // ClientModeAny downloads anything it can. In this mode, dst must + // be a directory. If src is a file, it is saved into the directory + // with the basename of the URL. If src is a directory or archive, + // it is unpacked directly into dst. + ClientModeAny + + // ClientModeFile downloads a single file. In this mode, dst must + // be a file path (doesn't have to exist). src must point to a single + // file. It is saved as dst. + ClientModeFile + + // ClientModeDir downloads a directory. In this mode, dst must be + // a directory path (doesn't have to exist). src must point to an + // archive or directory (such as in s3). + ClientModeDir +) diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go new file mode 100644 index 000000000..c1ee413b0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option.go @@ -0,0 +1,46 @@ +package getter + +import "context" + +// A ClientOption allows to configure a client +type ClientOption func(*Client) error + +// Configure configures a client with options. +func (c *Client) Configure(opts ...ClientOption) error { + if c.Ctx == nil { + c.Ctx = context.Background() + } + c.Options = opts + for _, opt := range opts { + err := opt(c) + if err != nil { + return err + } + } + // Default decompressor values + if c.Decompressors == nil { + c.Decompressors = Decompressors + } + // Default detector values + if c.Detectors == nil { + c.Detectors = Detectors + } + // Default getter values + if c.Getters == nil { + c.Getters = Getters + } + + for _, getter := range c.Getters { + getter.SetClient(c) + } + return nil +} + +// WithContext allows to pass a context to operation +// in order to be able to cancel a download in progress. +func WithContext(ctx context.Context) func(*Client) error { + return func(c *Client) error { + c.Ctx = ctx + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/client_option_progress.go new file mode 100644 index 000000000..9b185f71d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option_progress.go @@ -0,0 +1,38 @@ +package getter + +import ( + "io" +) + +// WithProgress allows for a user to track +// the progress of a download. +// For example by displaying a progress bar with +// current download. +// Not all getters have progress support yet. +func WithProgress(pl ProgressTracker) func(*Client) error { + return func(c *Client) error { + c.ProgressListener = pl + return nil + } +} + +// ProgressTracker allows to track the progress of downloads. +type ProgressTracker interface { + // TrackProgress should be called when + // a new object is being downloaded. + // src is the location the file is + // downloaded from. + // currentSize is the current size of + // the file in case it is a partial + // download. + // totalSize is the total size in bytes, + // size can be zero if the file size + // is not known. + // stream is the file being downloaded, every + // written byte will add up to processed size. + // + // TrackProgress returns a ReadCloser that wraps the + // download in progress ( stream ). + // When the download is finished, body shall be closed. + TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) +} diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/common.go new file mode 100644 index 000000000..d2afd8ad8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/common.go @@ -0,0 +1,14 @@ +package getter + +import ( + "io/ioutil" +) + +func tmpFile(dir, pattern string) (string, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return "", err + } + f.Close() + return f.Name(), nil +} diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go new file mode 100644 index 000000000..641fe6d0f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -0,0 +1,78 @@ +package getter + +import ( + "context" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +// +// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. +func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := Copy(ctx, dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go new file mode 100644 index 000000000..198bb0edd --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -0,0 +1,58 @@ +package getter + +import ( + "strings" +) + +// Decompressor defines the interface that must be implemented to add +// support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. +type Decompressor interface { + // Decompress should decompress src to dst. dir specifies whether dst + // is a directory or single file. src is guaranteed to be a single file + // that exists. dst is not guaranteed to exist already. + Decompress(dst, src string, dir bool) error +} + +// Decompressors is the mapping of extension to the Decompressor implementation +// that will decompress that extension/type. +var Decompressors map[string]Decompressor + +func init() { + tbzDecompressor := new(TarBzip2Decompressor) + tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) + + Decompressors = map[string]Decompressor{ + "bz2": new(Bzip2Decompressor), + "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), + "tar.bz2": tbzDecompressor, + "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, + "tbz2": tbzDecompressor, + "tgz": tgzDecompressor, + "txz": txzDecompressor, + "zip": new(ZipDecompressor), + } +} + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go new file mode 100644 index 000000000..339f4cf7a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -0,0 +1,45 @@ +package getter + +import ( + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// Bzip2Decompressor is an implementation of Decompressor that can +// decompress bz2 files. +type Bzip2Decompressor struct{} + +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, bzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go new file mode 100644 index 000000000..5ebf709b4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -0,0 +1,49 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// GzipDecompressor is an implementation of Decompressor that can +// decompress gzip files. +type GzipDecompressor struct{} + +func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("gzip-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gzipR.Close() + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, gzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 000000000..b6986a25a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,160 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + dirHdrs := []*tar.Header{} + now := time.Now() + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + break + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + // Record the directory information so that we may set its attributes + // after all files have been extracted + dirHdrs = append(dirHdrs, hdr) + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + + // Set the access and modification time if valid, otherwise default to current time + aTime := now + mTime := now + if hdr.AccessTime.Unix() > 0 { + aTime = hdr.AccessTime + } + if hdr.ModTime.Unix() > 0 { + mTime = hdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + // Perform a final pass over extracted directories to update metadata + for _, dirHdr := range dirHdrs { + path := filepath.Join(dst, dirHdr.Name) + // Chmod the directory since they might be created before we know the mode flags + if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil { + return err + } + // Set the mtime/atime attributes since they would have been changed during extraction + aTime := now + mTime := now + if dirHdr.AccessTime.Unix() > 0 { + aTime = dirHdr.AccessTime + } + if dirHdr.ModTime.Unix() > 0 { + mTime = dirHdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + return nil +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go new file mode 100644 index 000000000..5391b5c8c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -0,0 +1,33 @@ +package getter + +import ( + "compress/bzip2" + "os" + "path/filepath" +) + +// TarBzip2Decompressor is an implementation of Decompressor that can +// decompress tar.bz2 files. +type TarBzip2Decompressor struct{} + +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + return untar(bzipR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go new file mode 100644 index 000000000..b2f662a89 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -0,0 +1,171 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "time" + + "github.com/mitchellh/go-testing-interface" +) + +// TestDecompressCase is a single test case for testing decompressors +type TestDecompressCase struct { + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) +} + +// TestDecompressor is a helper function for testing generic decompressors. +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { + t.Helper() + + for _, tc := range cases { + t.Logf("Testing: %s", tc.Input) + + // Temporary dir to store stuff + td, err := ioutil.TempDir("", "getter") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Destination is always joining result so that we have a new path + dst := filepath.Join(td, "subdir", "result") + + // We use a function so defers work + func() { + defer os.RemoveAll(td) + + // Decompress + err := d.Decompress(dst, tc.Input, tc.Dir) + if (err != nil) != tc.Err { + t.Fatalf("err %s: %s", tc.Input, err) + } + if tc.Err { + return + } + + // If it isn't a directory, then check for a single file + if !tc.Dir { + fi, err := os.Stat(dst) + if err != nil { + t.Fatalf("err %s: %s", tc.Input, err) + } + if fi.IsDir() { + t.Fatalf("err %s: expected file, got directory", tc.Input) + } + if tc.FileMD5 != "" { + actual := testMD5(t, dst) + expected := tc.FileMD5 + if actual != expected { + t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) + } + } + + if tc.Mtime != nil { + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } else if actual.Unix() <= 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + } + + return + } + + // Convert expected for windows + expected := tc.DirList + if runtime.GOOS == "windows" { + for i, v := range expected { + expected[i] = strings.Replace(v, "/", "\\", -1) + } + } + + // Directory, check for the correct contents + actual := testListDir(t, dst) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) + } + // Check for correct atime/mtime + for _, dir := range actual { + path := filepath.Join(dst, dir) + if tc.Mtime != nil { + fi, err := os.Stat(path) + if err != nil { + t.Fatalf("err: %s", err) + } + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } else if actual.Unix() < 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + + } + } + }() + } +} + +func testListDir(t testing.T, path string) []string { + var result []string + err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + sub = strings.TrimPrefix(sub, path) + if sub == "" { + return nil + } + sub = sub[1:] // Trim the leading path sep. + + // If it is a dir, add trailing sep + if info.IsDir() { + sub += string(os.PathSeparator) + } + + result = append(result, sub) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(result) + return result +} + +func testMD5(t testing.T, path string) string { + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + t.Fatalf("err: %s", err) + } + + result := h.Sum(nil) + return hex.EncodeToString(result) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go new file mode 100644 index 000000000..65eb70dd2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "os" + "path/filepath" +) + +// TarGzipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type TarGzipDecompressor struct{} + +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) + } + defer gzipR.Close() + + return untar(gzipR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 000000000..5e151c127 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 000000000..4e37abab1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go new file mode 100644 index 000000000..0830f7914 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -0,0 +1,101 @@ +package getter + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" +) + +// ZipDecompressor is an implementation of Decompressor that can +// decompress zip files. +type ZipDecompressor struct{} + +func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // Open the zip + zipR, err := zip.OpenReader(src) + if err != nil { + return err + } + defer zipR.Close() + + // Check the zip integrity + if len(zipR.File) == 0 { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + if !dir && len(zipR.File) > 1 { + return fmt.Errorf("expected a single file: %s", src) + } + + // Go through and unarchive + for _, f := range zipR.File { + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + + path = filepath.Join(path, f.Name) + } + + if f.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // Create the enclosing directories if we must. ZIP files aren't + // required to contain entries for just the directories so this + // can happen. + if dir { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + } + + // Open the file for reading + srcF, err := f.Open() + if err != nil { + return err + } + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + srcF.Close() + return err + } + _, err = io.Copy(dstF, srcF) + srcF.Close() + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, f.Mode()); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go new file mode 100644 index 000000000..5bb750c9f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -0,0 +1,105 @@ +package getter + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-getter/helper/url" +) + +// Detector defines the interface that an invalid URL or a URL with a blank +// scheme is passed through in order to determine if its shorthand for +// something else well-known. +type Detector interface { + // Detect will detect whether the string matches a known pattern to + // turn it into a proper URL. + Detect(string, string) (string, bool, error) +} + +// Detectors is the list of detectors that are tried on an invalid URL. +// This is also the order they're tried (index 0 is first). +var Detectors []Detector + +func init() { + Detectors = []Detector{ + new(GitHubDetector), + new(GitDetector), + new(BitBucketDetector), + new(S3Detector), + new(GCSDetector), + new(FileDetector), + } +} + +// Detect turns a source string into another source string if it is +// detected to be of a known pattern. +// +// The third parameter should be the list of detectors to use in the +// order to try them. If you don't want to configure this, just use +// the global Detectors variable. +// +// This is safe to be called with an already valid source string: Detect +// will just return it. +func Detect(src string, pwd string, ds []Detector) (string, error) { + getForce, getSrc := getForcedGetter(src) + + // Separate out the subdir if there is one, we don't pass that to detect + getSrc, subDir := SourceDirSubdir(getSrc) + + u, err := url.Parse(getSrc) + if err == nil && u.Scheme != "" { + // Valid URL + return src, nil + } + + for _, d := range ds { + result, ok, err := d.Detect(getSrc, pwd) + if err != nil { + return "", err + } + if !ok { + continue + } + + var detectForce string + detectForce, result = getForcedGetter(result) + result, detectSubdir := SourceDirSubdir(result) + + // If we have a subdir from the detection, then prepend it to our + // requested subdir. + if detectSubdir != "" { + if subDir != "" { + subDir = filepath.Join(detectSubdir, subDir) + } else { + subDir = detectSubdir + } + } + + if subDir != "" { + u, err := url.Parse(result) + if err != nil { + return "", fmt.Errorf("Error parsing URL: %s", err) + } + u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + + result = u.String() + } + + // Preserve the forced getter if it exists. We try to use the + // original set force first, followed by any force set by the + // detector. + if getForce != "" { + result = fmt.Sprintf("%s::%s", getForce, result) + } else if detectForce != "" { + result = fmt.Sprintf("%s::%s", detectForce, result) + } + + return result, nil + } + + return "", fmt.Errorf("invalid source string: %s", src) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go new file mode 100644 index 000000000..19047eb19 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -0,0 +1,66 @@ +package getter + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// BitBucketDetector implements Detector to detect BitBucket URLs and turn +// them into URLs that the Git or Hg Getter can understand. +type BitBucketDetector struct{} + +func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "bitbucket.org/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { + u, err := url.Parse("https://" + src) + if err != nil { + return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) + } + + // We need to get info on this BitBucket repository to determine whether + // it is Git or Hg. + var info struct { + SCM string `json:"scm"` + } + infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path + resp, err := http.Get(infoUrl) + if err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + if resp.StatusCode == 403 { + // A private repo + return "", true, fmt.Errorf( + "shorthand BitBucket URL can't be used for private repos, " + + "please use a full URL") + } + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&info); err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + + switch info.SCM { + case "git": + if !strings.HasSuffix(u.Path, ".git") { + u.Path += ".git" + } + + return "git::" + u.String(), true, nil + case "hg": + return "hg::" + u.String(), true, nil + default: + return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go new file mode 100644 index 000000000..4ef41ea73 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// FileDetector implements Detector to detect file paths. +type FileDetector struct{} + +func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + if pwd == "" { + return "", true, fmt.Errorf( + "relative paths require a module with a pwd") + } + + // Stat the pwd to determine if its a symbolic link. If it is, + // then the pwd becomes the original directory. Otherwise, + // `filepath.Join` below does some weird stuff. + // + // We just ignore if the pwd doesn't exist. That error will be + // caught later when we try to use the URL. + if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { + if err != nil { + return "", true, err + } + if fi.Mode()&os.ModeSymlink != 0 { + pwd, err = filepath.EvalSymlinks(pwd) + if err != nil { + return "", true, err + } + + // The symlink itself might be a relative path, so we have to + // resolve this to have a correctly rooted URL. + pwd, err = filepath.Abs(pwd) + if err != nil { + return "", true, err + } + } + } + + src = filepath.Join(pwd, src) + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/vendor/github.com/hashicorp/go-getter/detect_gcs.go new file mode 100644 index 000000000..11363737c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_gcs.go @@ -0,0 +1,43 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GCSDetector implements Detector to detect GCS URLs and turn +// them into URLs that the GCSGetter can understand. +type GCSDetector struct{} + +func (d *GCSDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, "googleapis.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GCSDetector) detectHTTP(src string) (string, bool, error) { + + parts := strings.Split(src, "/") + if len(parts) < 5 { + return "", false, fmt.Errorf( + "URL is not a valid GCS URL") + } + version := parts[2] + bucket := parts[3] + object := strings.Join(parts[4:], "/") + + url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s", + version, bucket, object)) + if err != nil { + return "", false, fmt.Errorf("error parsing GCS URL: %s", err) + } + + return "gcs::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/detect_git.go new file mode 100644 index 000000000..eeb8a04c5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_git.go @@ -0,0 +1,26 @@ +package getter + +// GitDetector implements Detector to detect Git SSH URLs such as +// git@host.com:dir1/dir2 and converts them to proper URLs. +type GitDetector struct{} + +func (d *GitDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + u, err := detectSSH(src) + if err != nil { + return "", true, err + } + if u == nil { + return "", false, nil + } + + // We require the username to be "git" to assume that this is a Git URL + if u.User.Username() != "git" { + return "", false, nil + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go new file mode 100644 index 000000000..4bf4daf23 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -0,0 +1,47 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitHubDetector implements Detector to detect GitHub URLs and turn +// them into URLs that the Git Getter can understand. +type GitHubDetector struct{} + +func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "github.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitHub URLs should be github.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) + } + + if !strings.HasSuffix(url.Path, ".git") { + url.Path += ".git" + } + + if len(parts) > 3 { + url.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go new file mode 100644 index 000000000..8e0f4a03b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go @@ -0,0 +1,61 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// S3Detector implements Detector to detect S3 URLs and turn +// them into URLs that the S3 getter can understand. +type S3Detector struct{} + +func (d *S3Detector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, ".amazonaws.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *S3Detector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 2 { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } + + hostParts := strings.Split(parts[0], ".") + if len(hostParts) == 3 { + return d.detectPathStyle(hostParts[0], parts[1:]) + } else if len(hostParts) == 4 { + return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) + } else { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } +} + +func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} + +func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/detect_ssh.go new file mode 100644 index 000000000..c0dbe9d47 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_ssh.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "net/url" + "regexp" + "strings" +) + +// Note that we do not have an SSH-getter currently so this file serves +// only to hold the detectSSH helper that is used by other detectors. + +// sshPattern matches SCP-like SSH patterns (user@host:path) +var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$") + +// detectSSH determines if the src string matches an SSH-like URL and +// converts it into a net.URL compatible string. This returns nil if the +// string doesn't match the SSH pattern. +// +// This function is tested indirectly via detect_git_test.go +func detectSSH(src string) (*url.URL, error) { + matched := sshPattern.FindStringSubmatch(src) + if matched == nil { + return nil, nil + } + + user := matched[1] + host := matched[2] + path := matched[3] + qidx := strings.Index(path, "?") + if qidx == -1 { + qidx = len(path) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User(user) + u.Host = host + u.Path = path[0:qidx] + if qidx < len(path) { + q, err := url.ParseQuery(path[qidx+1:]) + if err != nil { + return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + u.RawQuery = q.Encode() + } + + return &u, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go new file mode 100644 index 000000000..647ccf459 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go @@ -0,0 +1,65 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// FolderStorage is an implementation of the Storage interface that manages +// modules on the disk. +type FolderStorage struct { + // StorageDir is the directory where the modules will be stored. + StorageDir string +} + +// Dir implements Storage.Dir +func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { + d = s.dir(key) + _, err = os.Stat(d) + if err == nil { + // Directory exists + e = true + return + } + if os.IsNotExist(err) { + // Directory doesn't exist + d = "" + e = false + err = nil + return + } + + // An error + d = "" + e = false + return +} + +// Get implements Storage.Get +func (s *FolderStorage) Get(key string, source string, update bool) error { + dir := s.dir(key) + if !update { + if _, err := os.Stat(dir); err == nil { + // If the directory already exists, then we're done since + // we're not updating. + return nil + } else if !os.IsNotExist(err) { + // If the error we got wasn't a file-not-exist error, then + // something went wrong and we should report it. + return fmt.Errorf("Error reading module directory: %s", err) + } + } + + // Get the source. This always forces an update. + return Get(dir, source) +} + +// dir returns the directory name internally that we'll use to map to +// internally. +func (s *FolderStorage) dir(key string) string { + sum := md5.Sum([]byte(key)) + return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go new file mode 100644 index 000000000..c233763c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -0,0 +1,152 @@ +// getter is a package for downloading files or directories from a variety of +// protocols. +// +// getter is unique in its ability to download both directories and files. +// It also detects certain source strings to be protocol-specific URLs. For +// example, "github.com/hashicorp/go-getter" would turn into a Git URL and +// use the Git protocol. +// +// Protocols and detectors are extensible. +// +// To get started, see Client. +package getter + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "regexp" + "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +// Getter defines the interface that schemes must implement to download +// things. +type Getter interface { + // Get downloads the given URL into the given directory. This always + // assumes that we're updating and gets the latest version that it can. + // + // The directory may already exist (if we're updating). If it is in a + // format that isn't understood, an error should be returned. Get shouldn't + // simply nuke the directory. + Get(string, *url.URL) error + + // GetFile downloads the give URL into the given path. The URL must + // reference a single file. If possible, the Getter should check if + // the remote end contains the same file and no-op this operation. + GetFile(string, *url.URL) error + + // ClientMode returns the mode based on the given URL. This is used to + // allow clients to let the getters decide which mode to use. + ClientMode(*url.URL) (ClientMode, error) + + // SetClient allows a getter to know it's client + // in order to access client's Get functions or + // progress tracking. + SetClient(*Client) +} + +// Getters is the mapping of scheme to the Getter implementation that will +// be used to get a dependency. +var Getters map[string]Getter + +// forcedRegexp is the regular expression that finds forced getters. This +// syntax is schema::url, example: git::https://foo.com +var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) + +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + +func init() { + httpGetter := &HttpGetter{ + Netrc: true, + } + + Getters = map[string]Getter{ + "file": new(FileGetter), + "git": new(GitGetter), + "gcs": new(GCSGetter), + "hg": new(HgGetter), + "s3": new(S3Getter), + "http": httpGetter, + "https": httpGetter, + } +} + +// Get downloads the directory specified by src into the folder specified by +// dst. If dst already exists, Get will attempt to update it. +// +// src is a URL, whereas dst is always just a file path to a folder. This +// folder doesn't need to exist. It will be created if it doesn't exist. +func Get(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: true, + Options: opts, + }).Get() +} + +// GetAny downloads a URL into the given destination. Unlike Get or +// GetFile, both directories and files are supported. +// +// dst must be a directory. If src is a file, it will be downloaded +// into dst with the basename of the URL. If src is a directory or +// archive, it will be unpacked directly into dst. +func GetAny(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Mode: ClientModeAny, + Options: opts, + }).Get() +} + +// GetFile downloads the file specified by src into the path specified by +// dst. +func GetFile(dst, src string, opts ...ClientOption) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: false, + Options: opts, + }).Get() +} + +// getRunCommand is a helper that will run a command and capture the output +// in the case an error happens. +func getRunCommand(cmd *exec.Cmd) error { + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + err := cmd.Run() + if err == nil { + return nil + } + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf( + "%s exited with %d: %s", + cmd.Path, + status.ExitStatus(), + buf.String()) + } + } + + return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) +} + +// getForcedGetter takes a source and returns the tuple of the forced +// getter and the raw URL (without the force syntax). +func getForcedGetter(src string) (string, string) { + var forced string + if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { + forced = ms[1] + src = ms[2] + } + + return forced, src +} diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go new file mode 100644 index 000000000..09e9b6313 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_base.go @@ -0,0 +1,20 @@ +package getter + +import "context" + +// getter is our base getter; it regroups +// fields all getters have in common. +type getter struct { + client *Client +} + +func (g *getter) SetClient(c *Client) { g.client = c } + +// Context tries to returns the Contex from the getter's +// client. otherwise context.Background() is returned. +func (g *getter) Context() context.Context { + if g == nil || g.client == nil { + return context.Background() + } + return g.client.Ctx +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go new file mode 100644 index 000000000..78660839a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -0,0 +1,36 @@ +package getter + +import ( + "net/url" + "os" +) + +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + getter + + // Copy, if set to true, will copy data instead of using a symlink. If + // false, attempts to symlink to speed up the operation and to lower the + // disk space usage. If the symlink fails, may attempt to copy on windows. + Copy bool +} + +func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ClientModeDir, nil + } + + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go new file mode 100644 index 000000000..d70fb4951 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_copy.go @@ -0,0 +1,29 @@ +package getter + +import ( + "context" + "io" +) + +// readerFunc is syntactic sugar for read interface. +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// Copy is a io.Copy cancellable by context +func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { + // Copy will call the Reader and Writer interface multiple time, in order + // to copy by chunk (avoiding loading the whole file in memory). + return io.Copy(dst, readerFunc(func(p []byte) (int, error) { + + select { + case <-ctx.Done(): + // context has been canceled + // stop process and propagate "context canceled" error + return 0, ctx.Err() + default: + // otherwise just run default io.Reader implementation + return src.Read(p) + } + })) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go new file mode 100644 index 000000000..c3b28ae51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -0,0 +1,103 @@ +// +build !windows + +package getter + +import ( + "fmt" + "net/url" + "os" + "path/filepath" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + return os.Symlink(path, dst) +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a file to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = Copy(ctx, dstF, srcF) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go new file mode 100644 index 000000000..24f1acb17 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -0,0 +1,136 @@ +// +build windows + +package getter + +import ( + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + sourcePath := toBackslash(path) + + // Use mklink to create a junction point + output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) + } + + return nil +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + if err = os.Symlink(path, dst); err == nil { + return err + } + lerr, ok := err.(*os.LinkError) + if !ok { + return err + } + switch lerr.Err { + case syscall.ERROR_PRIVILEGE_NOT_HELD: + // no symlink privilege, let's + // fallback to a copy to avoid an error. + break + default: + return err + } + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = Copy(ctx, dstF, srcF) + return err +} + +// toBackslash returns the result of replacing each slash character +// in path with a backslash ('\') character. Multiple separators are +// replaced by multiple backslashes. +func toBackslash(path string) string { + return strings.Replace(path, "/", "\\", -1) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/get_gcs.go new file mode 100644 index 000000000..6faa70f4f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_gcs.go @@ -0,0 +1,172 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" +) + +// GCSGetter is a Getter implementation that will download a module from +// a GCS bucket. +type GCSGetter struct { + getter +} + +func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return 0, err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return 0, err + } + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return 0, err + } + + if err == iterator.Done { + break + } + if strings.HasSuffix(obj.Name, "/") { + // A directory matched the prefix search, so this must be a directory + return ClientModeDir, nil + } else if obj.Name != object { + // A file matched the prefix search and doesn't have the same name + // as the query, so this must be a directory + return ClientModeDir, nil + } + } + // There are no directories or subdirectories, and if a match was returned, + // it was exactly equal to the prefix search. So return File mode + return ClientModeFile, nil +} + +func (g *GCSGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + + // Iterate through all matching objects. + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return err + } + if err == iterator.Done { + break + } + + if !strings.HasSuffix(obj.Name, "/") { + // Get the object destination path + objDst, err := filepath.Rel(object, obj.Name) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + // Download the matching object. + err = g.getObject(ctx, client, objDst, bucket, obj.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (g *GCSGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + return g.getObject(ctx, client, dst, bucket, object) +} + +func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error { + rc, err := client.Bucket(bucket).Object(object).NewReader(ctx) + if err != nil { + return err + } + defer rc.Close() + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = Copy(ctx, f, rc) + return err +} + +func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) { + if strings.Contains(u.Host, "googleapis.com") { + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + + pathParts := strings.SplitN(u.Path, "/", 5) + if len(pathParts) != 5 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + bucket = pathParts[3] + path = pathParts[4] + } + return +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go new file mode 100644 index 000000000..bb1ec316d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -0,0 +1,293 @@ +package getter + +import ( + "context" + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" + version "github.com/hashicorp/go-version" +) + +// GitGetter is a Getter implementation that will download a module from +// a git repository. +type GitGetter struct { + getter +} + +func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *GitGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + if _, err := exec.LookPath("git"); err != nil { + return fmt.Errorf("git must be available and on the PATH") + } + + // The port number must be parseable as an integer. If not, the user + // was probably trying to use a scp-style address, in which case the + // ssh:// prefix must be removed to indicate that. + // + // This is not necessary in versions of Go which have patched + // CVE-2019-14809 (e.g. Go 1.12.8+) + if portStr := u.Port(); portStr != "" { + if _, err := strconv.ParseUint(portStr, 10, 16); err != nil { + return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr) + } + } + + // Extract some query parameters we use + var ref, sshKey string + var depth int + q := u.Query() + if len(q) > 0 { + ref = q.Get("ref") + q.Del("ref") + + sshKey = q.Get("sshkey") + q.Del("sshkey") + + if n, err := strconv.Atoi(q.Get("depth")); err == nil { + depth = n + } + q.Del("depth") + + // Copy the URL + var newU url.URL = *u + u = &newU + u.RawQuery = q.Encode() + } + + var sshKeyFile string + if sshKey != "" { + // Check that the git version is sufficiently new. + if err := checkGitVersion("2.3"); err != nil { + return fmt.Errorf("Error using ssh key: %v", err) + } + + // We have an SSH key - decode it. + raw, err := base64.StdEncoding.DecodeString(sshKey) + if err != nil { + return err + } + + // Create a temp file for the key and ensure it is removed. + fh, err := ioutil.TempFile("", "go-getter") + if err != nil { + return err + } + sshKeyFile = fh.Name() + defer os.Remove(sshKeyFile) + + // Set the permissions prior to writing the key material. + if err := os.Chmod(sshKeyFile, 0600); err != nil { + return err + } + + // Write the raw key into the temp file. + _, err = fh.Write(raw) + fh.Close() + if err != nil { + return err + } + } + + // Clone or update the repository + _, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + err = g.update(ctx, dst, sshKeyFile, ref, depth) + } else { + err = g.clone(ctx, dst, sshKeyFile, u, depth) + } + if err != nil { + return err + } + + // Next: check out the proper tag/branch if it is specified, and checkout + if ref != "" { + if err := g.checkout(dst, ref); err != nil { + return err + } + } + + // Lastly, download any/all submodules. + return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) +} + +// GetFile for Git doesn't support updating at this time. It will download +// the file every time. +func (g *GitGetter) GetFile(dst string, u *url.URL) error { + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *GitGetter) checkout(dst string, ref string) error { + cmd := exec.Command("git", "checkout", ref) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error { + args := []string{"clone"} + + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + + args = append(args, u.String(), dst) + cmd := exec.CommandContext(ctx, "git", args...) + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error { + // Determine if we're a branch. If we're NOT a branch, then we just + // switch to master prior to checking out + cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd.Dir = dst + + if getRunCommand(cmd) != nil { + // Not a branch, switch to master. This will also catch non-existent + // branches, in which case we want to switch to master and then + // checkout the proper branch later. + ref = "master" + } + + // We have to be on a branch to pull + if err := g.checkout(dst, ref); err != nil { + return err + } + + if depth > 0 { + cmd = exec.Command("git", "pull", "--depth", strconv.Itoa(depth), "--ff-only") + } else { + cmd = exec.Command("git", "pull", "--ff-only") + } + + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// fetchSubmodules downloads any configured submodules recursively. +func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error { + args := []string{"submodule", "update", "--init", "--recursive"} + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + cmd := exec.CommandContext(ctx, "git", args...) + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// setupGitEnv sets up the environment for the given command. This is used to +// pass configuration data to git and ssh and enables advanced cloning methods. +func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } + + if sshKeyFile != "" { + // We have an SSH key temp file configured, tell ssh about this. + if runtime.GOOS == "windows" { + sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1) + } + sshCmd = append(sshCmd, "-i", sshKeyFile) + } + + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env +} + +// checkGitVersion is used to check the version of git installed on the system +// against a known minimum version. Returns an error if the installed version +// is older than the given minimum. +func checkGitVersion(min string) error { + want, err := version.NewVersion(min) + if err != nil { + return err + } + + out, err := exec.Command("git", "version").Output() + if err != nil { + return err + } + + fields := strings.Fields(string(out)) + if len(fields) < 3 { + return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) + } + v := fields[2] + if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") { + // on windows, git version will return for example: + // git version 2.20.1.windows.1 + // Which does not follow the semantic versionning specs + // https://semver.org. We remove that part in order for + // go-version to not error. + v = v[:strings.Index(v, ".windows.")] + } + + have, err := version.NewVersion(v) + if err != nil { + return err + } + + if have.LessThan(want) { + return fmt.Errorf("Required git version = %s, have %s", want, have) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go new file mode 100644 index 000000000..290649c91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -0,0 +1,135 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" +) + +// HgGetter is a Getter implementation that will download a module from +// a Mercurial repository. +type HgGetter struct { + getter +} + +func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *HgGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + if _, err := exec.LookPath("hg"); err != nil { + return fmt.Errorf("hg must be available and on the PATH") + } + + newURL, err := urlhelper.Parse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + + // Extract some query parameters we use + var rev string + q := newURL.Query() + if len(q) > 0 { + rev = q.Get("rev") + q.Del("rev") + + newURL.RawQuery = q.Encode() + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err != nil { + if err := g.clone(dst, newURL); err != nil { + return err + } + } + + if err := g.pull(dst, newURL); err != nil { + return err + } + + return g.update(ctx, dst, newURL, rev) +} + +// GetFile for Hg doesn't support updating at this time. It will download +// the file every time. +func (g *HgGetter) GetFile(dst string, u *url.URL) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.ToSlash(filepath.Dir(u.Path)) + + // If we're on Windows, we need to set the host to "localhost" for hg + if runtime.GOOS == "windows" { + u.Host = "localhost" + } + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true, getter: g.getter} + return fg.GetFile(dst, u) +} + +func (g *HgGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("hg", "clone", "-U", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *HgGetter) pull(dst string, u *url.URL) error { + cmd := exec.Command("hg", "pull") + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error { + args := []string{"update"} + if rev != "" { + args = append(args, rev) + } + + cmd := exec.CommandContext(ctx, "hg", args...) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go new file mode 100644 index 000000000..7c4541c6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -0,0 +1,322 @@ +package getter + +import ( + "context" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + + safetemp "github.com/hashicorp/go-safetemp" +) + +// HttpGetter is a Getter implementation that will download from an HTTP +// endpoint. +// +// For file downloads, HTTP is used directly. +// +// The protocol for downloading a directory from an HTTP endpoint is as follows: +// +// An HTTP GET request is made to the URL with the additional GET parameter +// "terraform-get=1". This lets you handle that scenario specially if you +// wish. The response must be a 2xx. +// +// First, a header is looked for "X-Terraform-Get" which should contain +// a source URL to download. +// +// If the header is not present, then a meta tag is searched for named +// "terraform-get" and the content should be a source URL. +// +// The source URL, whether from the header or meta tag, must be a fully +// formed URL. The shorthand syntax of "github.com/foo/bar" or relative +// paths are not allowed. +type HttpGetter struct { + getter + + // Netrc, if true, will lookup and use auth information found + // in the user's netrc file if available. + Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client + + // Header contains optional request header fields that should be included + // with every HTTP request. Note that the zero value of this field is nil, + // and as such it needs to be initialized before use, via something like + // make(http.Header). + Header http.Header +} + +func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { + if strings.HasSuffix(u.Path, "/") { + return ClientModeDir, nil + } + return ClientModeFile, nil +} + +func (g *HttpGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + // Copy the URL so we can modify it + var newU url.URL = *u + u = &newU + + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + if g.Client == nil { + g.Client = httpClient + } + + // Add terraform-get to the parameter. + q := u.Query() + q.Add("terraform-get", "1") + u.RawQuery = q.Encode() + + // Get the URL + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return err + } + + req.Header = g.Header + resp, err := g.Client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Extract the source URL + var source string + if v := resp.Header.Get("X-Terraform-Get"); v != "" { + source = v + } else { + source, err = g.parseMeta(resp.Body) + if err != nil { + return err + } + } + if source == "" { + return fmt.Errorf("no source URL was returned") + } + + // If there is a subdir component, then we download the root separately + // into a temporary directory, then copy over the proper subdir. + source, subDir := SourceDirSubdir(source) + if subDir == "" { + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + return Get(dst, source, opts...) + } + + // We have a subdir, time to jump some hoops + return g.getSubdir(ctx, dst, source, subDir) +} + +func (g *HttpGetter) GetFile(dst string, src *url.URL) error { + ctx := g.Context() + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(src); err != nil { + return err + } + } + + // Create all the parent directories if needed + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) + if err != nil { + return err + } + defer f.Close() + + if g.Client == nil { + g.Client = httpClient + } + + var currentFileSize int64 + + // We first make a HEAD request so we can check + // if the server supports range queries. If the server/URL doesn't + // support HEAD requests, we just fall back to GET. + req, err := http.NewRequest("HEAD", src.String(), nil) + if err != nil { + return err + } + if g.Header != nil { + req.Header = g.Header + } + headResp, err := g.Client.Do(req) + if err == nil && headResp != nil { + headResp.Body.Close() + if headResp.StatusCode == 200 { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if headResp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := f.Stat(); err == nil { + if _, err = f.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + currentFileSize = fi.Size() + totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) + if currentFileSize >= totalFileSize { + // file already present + return nil + } + } + } + } + } + } + req.Method = "GET" + + resp, err := g.Client.Do(req) + if err != nil { + return err + } + switch resp.StatusCode { + case http.StatusOK, http.StatusPartialContent: + // all good + default: + resp.Body.Close() + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + body := resp.Body + + if g.client != nil && g.client.ProgressListener != nil { + // track download + fn := filepath.Base(src.EscapedPath()) + body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) + } + defer body.Close() + + n, err := Copy(ctx, f, body) + if err == nil && n < resp.ContentLength { + err = io.ErrShortWrite + } + return err +} + +// getSubdir downloads the source into the destination, but with +// the proper subdir. +func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") + if err != nil { + return err + } + defer tdcloser.Close() + + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + // Download that into the given directory + if err := Get(td, source, opts...); err != nil { + return err + } + + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { + return err + } + + // Make sure the subdir path actually exists + if _, err := os.Stat(sourcePath); err != nil { + return fmt.Errorf( + "Error downloading %s: %s", source, err) + } + + // Copy the subdirectory into our actual destination. + if err := os.RemoveAll(dst); err != nil { + return err + } + + // Make the final destination + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + return copyDir(ctx, dst, sourcePath, false) +} + +// parseMeta looks for the first meta tag in the given reader that +// will give us the source URL. +func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var err error + var t xml.Token + for { + t, err = d.Token() + if err != nil { + if err == io.EOF { + err = nil + } + return "", err + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + return "", nil + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + return "", nil + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "terraform-get" { + continue + } + if f := attrValue(e.Attr, "content"); f != "" { + return f, nil + } + } +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} + +// charsetReader returns a reader for the given charset. Currently +// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go new file mode 100644 index 000000000..e2a98ea28 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -0,0 +1,54 @@ +package getter + +import ( + "net/url" +) + +// MockGetter is an implementation of Getter that can be used for tests. +type MockGetter struct { + getter + + // Proxy, if set, will be called after recording the calls below. + // If it isn't set, then the *Err values will be returned. + Proxy Getter + + GetCalled bool + GetDst string + GetURL *url.URL + GetErr error + + GetFileCalled bool + GetFileDst string + GetFileURL *url.URL + GetFileErr error +} + +func (g *MockGetter) Get(dst string, u *url.URL) error { + g.GetCalled = true + g.GetDst = dst + g.GetURL = u + + if g.Proxy != nil { + return g.Proxy.Get(dst, u) + } + + return g.GetErr +} + +func (g *MockGetter) GetFile(dst string, u *url.URL) error { + g.GetFileCalled = true + g.GetFileDst = dst + g.GetFileURL = u + + if g.Proxy != nil { + return g.Proxy.GetFile(dst, u) + } + return g.GetFileErr +} + +func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { + if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { + return ClientModeDir, nil + } + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go new file mode 100644 index 000000000..93eeb0b81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -0,0 +1,275 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3Getter is a Getter implementation that will download a module from +// a S3 bucket. +type S3Getter struct { + getter +} + +func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return 0, err + } + + // Create client config + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + + // List the object(s) at the given prefix + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + resp, err := client.ListObjects(req) + if err != nil { + return 0, err + } + + for _, o := range resp.Contents { + // Use file mode on exact match. + if *o.Key == path { + return ClientModeFile, nil + } + + // Use dir mode if child keys are found. + if strings.HasPrefix(*o.Key, path+"/") { + return ClientModeDir, nil + } + } + + // There was no match, so just return file mode. The download is going + // to fail but we will let S3 return the proper error later. + return ClientModeFile, nil +} + +func (g *S3Getter) Get(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + + // List files in path, keep listing until no more objects are found + lastMarker := "" + hasMore := true + for hasMore { + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + if lastMarker != "" { + req.Marker = aws.String(lastMarker) + } + + resp, err := client.ListObjects(req) + if err != nil { + return err + } + + hasMore = aws.BoolValue(resp.IsTruncated) + + // Get each object storing each file relative to the destination path + for _, object := range resp.Contents { + lastMarker = aws.StringValue(object.Key) + objPath := aws.StringValue(object.Key) + + // If the key ends with a backslash assume it is a directory and ignore + if strings.HasSuffix(objPath, "/") { + continue + } + + // Get the object destination path + objDst, err := filepath.Rel(path, objPath) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + + if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { + return err + } + } + } + + return nil +} + +func (g *S3Getter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + region, bucket, path, version, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + return g.getObject(ctx, client, dst, bucket, path, version) +} + +func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { + req := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if version != "" { + req.VersionId = aws.String(version) + } + + resp, err := client.GetObject(req) + if err != nil { + return err + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = Copy(ctx, f, resp.Body) + return err +} + +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { + conf := &aws.Config{} + if creds == nil { + // Grab the metadata URL + metadataURL := os.Getenv("AWS_METADATA_URL") + if metadataURL == "" { + metadataURL = "http://169.254.169.254:80/latest" + } + + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(&aws.Config{ + Endpoint: aws.String(metadataURL), + })), + }, + }) + } + + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + + conf.Credentials = creds + if region != "" { + conf.Region = aws.String(region) + } + + return conf +} + +func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } + + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } + + _, hasAwsId := u.Query()["aws_access_key_id"] + _, hasAwsSecret := u.Query()["aws_access_key_secret"] + _, hasAwsToken := u.Query()["aws_access_token"] + if hasAwsId || hasAwsSecret || hasAwsToken { + creds = credentials.NewStaticCredentials( + u.Query().Get("aws_access_key_id"), + u.Query().Get("aws_access_key_secret"), + u.Query().Get("aws_access_token"), + ) + } + + return +} diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/go.mod new file mode 100644 index 000000000..a869e8f80 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.mod @@ -0,0 +1,23 @@ +module github.com/hashicorp/go-getter + +require ( + cloud.google.com/go v0.45.1 + github.com/aws/aws-sdk-go v1.15.78 + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d + github.com/cheggaaa/pb v1.0.27 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fatih/color v1.7.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-safetemp v1.0.0 + github.com/hashicorp/go-version v1.1.0 + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mitchellh/go-homedir v1.0.0 + github.com/mitchellh/go-testing-interface v1.0.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/ulikunitz/xz v0.5.5 + google.golang.org/api v0.9.0 + gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect +) diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/go.sum new file mode 100644 index 000000000..b88c747ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.sum @@ -0,0 +1,162 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go new file mode 100644 index 000000000..02497c254 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go @@ -0,0 +1,14 @@ +package url + +import ( + "net/url" +) + +// Parse parses rawURL into a URL structure. +// The rawURL may be relative or absolute. +// +// Parse is a wrapper for the Go stdlib net/url Parse function, but returns +// Windows "safe" URLs on Windows platforms. +func Parse(rawURL string) (*url.URL, error) { + return parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go new file mode 100644 index 000000000..ed1352a91 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package url + +import ( + "net/url" +) + +func parse(rawURL string) (*url.URL, error) { + return url.Parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go new file mode 100644 index 000000000..4280ec59a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -0,0 +1,39 @@ +package url + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" +) + +func parse(rawURL string) (*url.URL, error) { + // Make sure we're using "/" since URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter. In which case we + // force the 'file' scheme to avoid "net/url" URL.String() prepending + // our url with "./". + rawURL = "file://" + rawURL + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go new file mode 100644 index 000000000..c7f6a3fb3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/netrc.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "net/url" + "os" + "runtime" + + "github.com/bgentry/go-netrc/netrc" + "github.com/mitchellh/go-homedir" +) + +// addAuthFromNetrc adds auth information to the URL from the user's +// netrc file if it can be found. This will only add the auth info +// if the URL doesn't already have auth info specified and the +// the username is blank. +func addAuthFromNetrc(u *url.URL) error { + // If the URL already has auth information, do nothing + if u.User != nil && u.User.Username() != "" { + return nil + } + + // Get the netrc file path + path := os.Getenv("NETRC") + if path == "" { + filename := ".netrc" + if runtime.GOOS == "windows" { + filename = "_netrc" + } + + var err error + path, err = homedir.Expand("~/" + filename) + if err != nil { + return err + } + } + + // If the file is not a file, then do nothing + if fi, err := os.Stat(path); err != nil { + // File doesn't exist, do nothing + if os.IsNotExist(err) { + return nil + } + + // Some other error! + return err + } else if fi.IsDir() { + // File is directory, ignore + return nil + } + + // Load up the netrc file + net, err := netrc.ParseFile(path) + if err != nil { + return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) + } + + machine := net.FindMachine(u.Host) + if machine == nil { + // Machine not found, no problem + return nil + } + + // Set the user info + u.User = url.UserPassword(machine.Login, machine.Password) + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go new file mode 100644 index 000000000..dab6d400c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -0,0 +1,75 @@ +package getter + +import ( + "fmt" + "path/filepath" + "strings" +) + +// SourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +// +func SourceDirSubdir(src string) (string, string) { + + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src[:stop], "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:stop], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go new file mode 100644 index 000000000..2bc6b9ec3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/storage.go @@ -0,0 +1,13 @@ +package getter + +// Storage is an interface that knows how to lookup downloaded directories +// as well as download and update directories from their sources into the +// proper location. +type Storage interface { + // Dir returns the directory on local disk where the directory source + // can be loaded from. + Dir(string) (string, bool, error) + + // Get will download and optionally update the given directory. + Get(string, string, bool) error +} diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore new file mode 100644 index 000000000..42cc4105f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/.gitignore @@ -0,0 +1 @@ +.idea* \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 000000000..abaf1e45f --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 000000000..9b6845e98 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 000000000..7815f5019 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 000000000..3efc54c12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,48 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 000000000..0d079a654 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 000000000..e03ee77d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 000000000..219656c4c --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,527 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output), + level: new(int32), + } + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, level, msg, args...) + } else { + l.log(t, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + if l.caller { + if _, file, line, ok := runtime.Caller(3); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if l.name != "" { + l.writer.WriteString(l.name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if l.name != "" { + vals["@module"] = l.name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 000000000..080ed7999 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,176 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string +} diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 000000000..7ad6b351e --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,52 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 000000000..9b27bd3d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 000000000..044a46960 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,83 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + switch s.forceLevel { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else { + s.log.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 000000000..7e8ec729d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer +} + +func newWriter(w io.Writer) *writer { + return &writer{w: w} +} + +func (w *writer) Flush(level Level) (err error) { + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, w.b.Bytes()) + } else { + _, err = w.w.Write(w.b.Bytes()) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml new file mode 100644 index 000000000..304a83595 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.x + +branches: + only: + - master + +script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 000000000..b97cd6ed0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 000000000..ead5830f7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,97 @@ +# go-multierror + +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 000000000..775b6e753 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 000000000..aab8e9abe --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 000000000..47f13c49a --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 000000000..2534331d5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-multierror + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 000000000..85b1f8ff3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 000000000..89b1422d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 000000000..5c477abe4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 000000000..fecb14e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore new file mode 100644 index 000000000..4befed30a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +.idea diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 000000000..fe305ad59 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,168 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. Additionally, go-plugin can communicate with the plugin over + TLS. + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 000000000..bc56559c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,1025 @@ +package plugin + +import ( + "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns the protocol client for this connection. +// +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) + + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) + } + + if err != nil { + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + c.l.Unlock() + + // If there is no process, there is nothing to kill. + if process == nil { + return + } + + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + c.logger.Warn("error closing client during Kill", "err", err) + } + } else { + c.logger.Error("client", "error", err) + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") + return + case <-time.After(2 * time.Second): + } + } + + // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") + process.Kill() + + c.l.Lock() + c.processKilled = true + c.l.Unlock() +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stack here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach + } + } + + if c.config.Reattach != nil { + return c.reattach() + } + + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } + + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) + + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } + + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), + } + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) + go func() { + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() + + // Wait for the command to end. + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } + + // Log and make sure to flush the logs write away + c.logger.Debug("plugin process exited", debugMsgArgs...) + os.Stderr.Sync() + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan string) + c.clientWaitGroup.Add(1) + go func() { + defer c.clientWaitGroup.Done() + defer close(linesCh) + + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) + defer func() { + go func() { + defer c.clientWaitGroup.Done() + for range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + c.logger.Debug("waiting for RPC address", "path", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-c.doneCtx.Done(): + err = errors.New("plugin exited before we could connect") + case line := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Core version: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) + if err != nil { + return addr, err + } + + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } + } + + c.address = addr + return +} + +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil + } +} + +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + +func (c *Client) logStderr(r io.Reader) { + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + + for { + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } + + continuation = isPrefix + continue + } + + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 000000000..d22c566ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 000000000..22a7baa6a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 000000000..f3ddf44e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,17 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 000000000..21b14e998 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 000000000..daf142d17 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 000000000..d0d0d8e20 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,117 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 000000000..1a8a8e70e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 000000000..d3dbf1ced --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 000000000..aa2fdc813 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 000000000..b6850aa59 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 000000000..3fa79e8ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 000000000..38b420432 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 000000000..345d0a1c1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 000000000..fb2ef930c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 000000000..889552458 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 000000000..01c45ad7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 000000000..79d967463 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,58 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "context" + "errors" + "net/rpc" + + "google.golang.org/grpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 000000000..88c999a58 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 000000000..70ba546bf --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 000000000..9f7b01809 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 000000000..0cfc19e52 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 000000000..f30a4b1d3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,170 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 000000000..5bb18dd5d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,197 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 000000000..4c230e3ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,452 @@ +package plugin + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "sort" + "strconv" + "strings" + "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + + // Plugins are the plugins that are served. + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to os.Stderr. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + logger.Error("plugin init error", "error", err) + return + } + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return + } + + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Accept connections and wait for completion + go server.Serve(listener) + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } + } + + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 000000000..033079ea0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 000000000..1d547aaaa --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 000000000..2cf2c26cc --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,180 @@ +package plugin + +import ( + "bytes" + "context" + "io" + "net" + "net/rpc" + + "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" +) + +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md new file mode 100644 index 000000000..02ece3317 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/README.md @@ -0,0 +1,10 @@ +# go-safetemp +[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) + +Functions for safely working with temporary directories and files. + +## Why? + +The Go standard library provides the excellent `ioutil` package for +working with temporary directories and files. This library builds on top +of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/go.mod b/vendor/github.com/hashicorp/go-safetemp/go.mod new file mode 100644 index 000000000..02bc5f5bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-safetemp diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 000000000..c4ae72b78 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml new file mode 100644 index 000000000..769849071 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml @@ -0,0 +1,12 @@ +language: go + +sudo: false + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 000000000..fbde8b9ae --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) + +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 000000000..dd57f9d21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 000000000..911227f61 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,65 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + buf := make([]byte, size) + if _, err := rand.Read(buf); err != nil { + return nil, fmt.Errorf("failed to read random bytes: %v", err) + } + return buf, nil +} + +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf, err := GenerateRandomBytes(uuidLen) + if err != nil { + return "", err + } + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) + } + + return fmt.Sprintf("%x-%x-%x-%x-%x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 2 * uuidLen + 4 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != uuidLen { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 000000000..01c5dc219 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.9 + - "1.10" + - 1.11 + - 1.12 + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 000000000..6f3a15ce7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 000000000..d05575961 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,204 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 000000000..f5285555f --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 000000000..1032c5606 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,380 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: si, + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqualTo tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqualTo tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 000000000..cc888d43e --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 000000000..822fa09f5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 000000000..3f83d9023 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.8 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 000000000..9fafd5017 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 000000000..c8223326d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 000000000..0b39c1b95 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,724 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 000000000..575a20b50 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 000000000..6e5ef654b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 000000000..ba07ad42b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 000000000..5c99381df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 000000000..b4881806e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,520 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 000000000..69662367f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,651 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // If we see a null character with data left, then that is an error + if ch == '\x00' && s.buf.Len() > 0 { + s.err("unexpected null character (0x00)") + return eof + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 000000000..5f981eaa2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 000000000..e37c0664e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 000000000..125a5f072 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 000000000..dd5c72bb3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 000000000..59c1bb72d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 000000000..95a0c3eee --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 000000000..d9993c292 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 000000000..1fca53c4c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md new file mode 100644 index 000000000..ccb46bbd8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md @@ -0,0 +1,23 @@ +# HCL Changelog + +## v2.0.0 (Oct 2, 2019) + +Initial release of HCL 2, which is a new implementating combining the HCL 1 +language with the HIL expression language to produce a single language +supporting both nested configuration structures and arbitrary expressions. + +HCL 2 has an entirely new Go library API and so is _not_ a drop-in upgrade +relative to HCL 1. It's possible to import both versions of HCL into a single +program using Go's _semantic import versioning_ mechanism: + +``` +import ( + hcl1 "github.com/hashicorp/hcl" + hcl2 "github.com/hashicorp/hcl/v2" +) +``` + +--- + +Prior to v2.0.0 there was not a curated changelog. Consult the git history +from the latest v1.x.x tag for information on the changes to HCL 1. diff --git a/vendor/github.com/hashicorp/hcl/v2/LICENSE b/vendor/github.com/hashicorp/hcl/v2/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md new file mode 100644 index 000000000..d807a4245 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/README.md @@ -0,0 +1,204 @@ +# HCL + +HCL is a toolkit for creating structured configuration languages that are +both human- and machine-friendly, for use with command-line tools. +Although intended to be generally useful, it is primarily targeted +towards devops tools, servers, etc. + +> **NOTE:** This is major version 2 of HCL, whose Go API is incompatible with +> major version 1. Both versions are available for selection in Go Modules +> projects. HCL 2 _cannot_ be imported from Go projects that are not using Go Modules. For more information, see +> [our version selection guide](https://github.com/golang/go/wiki/Version-Selection). + +HCL has both a _native syntax_, intended to be pleasant to read and write for +humans, and a JSON-based variant that is easier for machines to generate +and parse. + +The HCL native syntax is inspired by [libucl](https://github.com/vstakhov/libucl), +[nginx configuration](http://nginx.org/en/docs/beginners_guide.html#conf_structure), +and others. + +It includes an expression syntax that allows basic inline computation and, +with support from the calling application, use of variables and functions +for more dynamic configuration languages. + +HCL provides a set of constructs that can be used by a calling application to +construct a configuration language. The application defines which attribute +names and nested block types are expected, and HCL parses the configuration +file, verifies that it conforms to the expected structure, and returns +high-level objects that the application can use for further processing. + +```go +package main + +import ( + "log" + "github.com/hashicorp/hcl/v2/hclsimple" +) + +type Config struct { + LogLevel string `hcl:"log_level"` +} + +func main() { + var config Config + err := hclsimple.DecodeFile("config.hcl", nil, &config) + if err != nil { + log.Fatalf("Failed to load configuration: %s", err) + } + log.Printf("Configuration is %#v", config) +} +``` + +A lower-level API is available for applications that need more control over +the parsing, decoding, and evaluation of configuration. + +## Why? + +Newcomers to HCL often ask: why not JSON, YAML, etc? + +Whereas JSON and YAML are formats for serializing data structures, HCL is +a syntax and API specifically designed for building structured configuration +formats. + +HCL attempts to strike a compromise between generic serialization formats +such as JSON and configuration formats built around full programming languages +such as Ruby. HCL syntax is designed to be easily read and written by humans, +and allows _declarative_ logic to permit its use in more complex applications. + +HCL is intended as a base syntax for configuration formats built +around key-value pairs and hierarchical blocks whose structure is well-defined +by the calling application, and this definition of the configuration structure +allows for better error messages and more convenient definition within the +calling application. + +It can't be denied that JSON is very convenient as a _lingua franca_ +for interoperability between different pieces of software. Because of this, +HCL defines a common configuration model that can be parsed from either its +native syntax or from a well-defined equivalent JSON structure. This allows +configuration to be provided as a mixture of human-authored configuration +files in the native syntax and machine-generated files in JSON. + +## Information Model and Syntax + +HCL is built around two primary concepts: _attributes_ and _blocks_. In +native syntax, a configuration file for a hypothetical application might look +something like this: + +```hcl +io_mode = "async" + +service "http" "web_proxy" { + listen_addr = "127.0.0.1:8080" + + process "main" { + command = ["/usr/local/bin/awesome-app", "server"] + } + + process "mgmt" { + command = ["/usr/local/bin/awesome-app", "mgmt"] + } +} +``` + +The JSON equivalent of this configuration is the following: + +```json +{ + "io_mode": "async", + "service": { + "http": { + "web_proxy": { + "listen_addr": "127.0.0.1:8080", + "process": { + "main": { + "command": ["/usr/local/bin/awesome-app", "server"] + }, + "mgmt": { + "command": ["/usr/local/bin/awesome-app", "mgmt"] + }, + } + } + } + } +} +``` + +Regardless of which syntax is used, the API within the calling application +is the same. It can either work directly with the low-level attributes and +blocks, for more advanced use-cases, or it can use one of the _decoder_ +packages to declaratively extract into either Go structs or dynamic value +structures. + +Attribute values can be expressions as well as just literal values: + +```hcl +# Arithmetic with literals and application-provided variables +sum = 1 + addend + +# String interpolation and templates +message = "Hello, ${name}!" + +# Application-provided functions +shouty_message = upper(message) +``` + +Although JSON syntax doesn't permit direct use of expressions, the interpolation +syntax allows use of arbitrary expressions within JSON strings: + +```json +{ + "sum": "${1 + addend}", + "message": "Hello, ${name}!", + "shouty_message": "${upper(message)}" +} +``` + +For more information, see the detailed specifications: + +* [Syntax-agnostic Information Model](hcl/spec.md) +* [HCL Native Syntax](hcl/hclsyntax/spec.md) +* [JSON Representation](hcl/json/spec.md) + +## Changes in 2.0 + +Version 2.0 of HCL combines the features of HCL 1.0 with those of the +interpolation language HIL to produce a single configuration language that +supports arbitrary expressions. + +This new version has a completely new parser and Go API, with no direct +migration path. Although the syntax is similar, the implementation takes some +very different approaches to improve on some "rough edges" that existed with +the original implementation and to allow for more robust error handling. + +It's possible to import both HCL 1 and HCL 2 into the same program using Go's +_semantic import versioning_ mechanism: + +```go +import ( + hcl1 "github.com/hashicorp/hcl" + hcl2 "github.com/hashicorp/hcl/v2" +) +``` + +## Acknowledgements + +HCL was heavily inspired by [libucl](https://github.com/vstakhov/libucl), +by [Vsevolod Stakhov](https://github.com/vstakhov). + +HCL and HIL originate in [HashiCorp Terraform](https://terraform.io/), +with the original parsers for each written by +[Mitchell Hashimoto](https://github.com/mitchellh). + +The original HCL parser was ported to pure Go (from yacc) by +[Fatih Arslan](https://github.com/fatih). The structure-related portions of +the new native syntax parser build on that work. + +The original HIL parser was ported to pure Go (from yacc) by +[Martin Atkins](https://github.com/apparentlymart). The expression-related +portions of the new native syntax parser build on that work. + +HCL 2, which merged the original HCL and HIL languages into this single new +language, builds on design and prototyping work by +[Martin Atkins](https://github.com/apparentlymart) in +[zcl](https://github.com/zclconf/go-zcl). diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go new file mode 100644 index 000000000..c320961e1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go @@ -0,0 +1,143 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and Detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + + // Subject and Context are both source ranges relating to the diagnostic. + // + // Subject is a tight range referring to exactly the construct that + // is problematic, while Context is an optional broader range (which should + // fully contain Subject) that ought to be shown around Subject when + // generating isolated source-code snippets in diagnostic messages. + // If Context is nil, the Subject is also the Context. + // + // Some diagnostics have no source ranges at all. If Context is set then + // Subject should always also be set. + Subject *Range + Context *Range + + // For diagnostics that occur when evaluating an expression, Expression + // may refer to that expression and EvalContext may point to the + // EvalContext that was active when evaluating it. This may allow for the + // inclusion of additional useful information when rendering a diagnostic + // message to the user. + // + // It is not always possible to select a single EvalContext for a + // diagnostic, and so in some cases this field may be nil even when an + // expression causes a problem. + // + // EvalContexts form a tree, so the given EvalContext may refer to a parent + // which in turn refers to another parent, etc. For a full picture of all + // of the active variables and functions the caller must walk up this + // chain, preferring definitions that are "closer" to the expression in + // case of colliding names. + Expression Expression + EvalContext *EvalContext +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (d Diagnostics) Errs() []error { + var errs []error + for _, diag := range d { + if diag.Severity == DiagError { + errs = append(errs, diag) + } + } + + return errs +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go new file mode 100644 index 000000000..0b4a2629b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go @@ -0,0 +1,311 @@ +package hcl + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sort" + + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, highlightCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) + + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue + } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + + } + + w.wr.Write([]byte{'\n'}) + } + + if diag.Expression != nil && diag.EvalContext != nil { + // We will attempt to render the values for any variables + // referenced in the given expression as additional context, for + // situations where the same expression is evaluated multiple + // times in different scopes. + expr := diag.Expression + ctx := diag.EvalContext + + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + for _, traversal := range vars { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + continue + } + + traversalStr := w.traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val))) + } + seen[traversalStr] = struct{}{} + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + last := len(stmts) - 1 + + for i, stmt := range stmts { + switch i { + case 0: + w.wr.Write([]byte{'w', 'i', 't', 'h', ' '}) + default: + w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '}) + } + w.wr.Write([]byte(stmt)) + switch i { + case last: + w.wr.Write([]byte{'.', '\n', '\n'}) + default: + w.wr.Write([]byte{',', '\n'}) + } + } + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case TraverseRoot: + buf.WriteString(tStep.Name) + case TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(w.valueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +func (w *diagnosticTextWriter) valueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go new file mode 100644 index 000000000..c12833440 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl/v2/doc.go b/vendor/github.com/hashicorp/hcl/v2/doc.go new file mode 100644 index 000000000..0d43fb2c7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/doc.go @@ -0,0 +1,34 @@ +// Package hcl contains the main modelling types and general utility functions +// for HCL. +// +// For a simple entry point into HCL, see the package in the subdirectory +// "hclsimple", which has an opinionated function Decode that can decode HCL +// configurations in either native HCL syntax or JSON syntax into a Go struct +// type: +// +// package main +// +// import ( +// "log" +// "github.com/hashicorp/hcl/v2/hclsimple" +// ) +// +// type Config struct { +// LogLevel string `hcl:"log_level"` +// } +// +// func main() { +// var config Config +// err := hclsimple.DecodeFile("config.hcl", nil, &config) +// if err != nil { +// log.Fatalf("Failed to load configuration: %s", err) +// } +// log.Printf("Configuration is %#v", config) +// } +// +// If your application needs more control over the evaluation of the +// configuration, you can use the functions in the subdirectories hclparse, +// gohcl, hcldec, etc. Splitting the handling of configuration into multiple +// phases allows for advanced patterns such as allowing expressions in one +// part of the configuration to refer to data defined in another part. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/v2/eval_context.go b/vendor/github.com/hashicorp/hcl/v2/eval_context.go new file mode 100644 index 000000000..915910ad8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_call.go b/vendor/github.com/hashicorp/hcl/v2/expr_call.go new file mode 100644 index 000000000..6963fbae3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_list.go b/vendor/github.com/hashicorp/hcl/v2/expr_list.go new file mode 100644 index 000000000..d05cca0b9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/expr_list.go @@ -0,0 +1,37 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprList) + return supported + }) + + if exL, supported := physExpr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_map.go b/vendor/github.com/hashicorp/hcl/v2/expr_map.go new file mode 100644 index 000000000..96d1ce4bf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/expr_map.go @@ -0,0 +1,44 @@ +package hcl + +// ExprMap tests if the given expression is a static map construct and, +// if so, extracts the expressions that represent the map elements. +// If the given expression is not a static map, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprMap that takes no arguments and returns +// []KeyValuePair. This method should return nil if a static map cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) { + type exprMap interface { + ExprMap() []KeyValuePair + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprMap) + return supported + }) + + if exM, supported := physExpr.(exprMap); supported { + if pairs := exM.ExprMap(); pairs != nil { + return pairs, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static map expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// KeyValuePair represents a pair of expressions that serve as a single item +// within a map or object definition construct. +type KeyValuePair struct { + Key Expression + Value Expression +} diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go new file mode 100644 index 000000000..6d5d205c4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go @@ -0,0 +1,68 @@ +package hcl + +type unwrapExpression interface { + UnwrapExpression() Expression +} + +// UnwrapExpression removes any "wrapper" expressions from the given expression, +// to recover the representation of the physical expression given in source +// code. +// +// Sometimes wrapping expressions are used to modify expression behavior, e.g. +// in extensions that need to make some local variables available to certain +// sub-trees of the configuration. This can make it difficult to reliably +// type-assert on the physical AST types used by the underlying syntax. +// +// Unwrapping an expression may modify its behavior by stripping away any +// additional constraints or capabilities being applied to the Value and +// Variables methods, so this function should generally only be used prior +// to operations that concern themselves with the static syntax of the input +// configuration, and not with the effective value of the expression. +// +// Wrapper expression types must support unwrapping by implementing a method +// called UnwrapExpression that takes no arguments and returns the embedded +// Expression. Implementations of this method should peel away only one level +// of wrapping, if multiple are present. This method may return nil to +// indicate _dynamically_ that no wrapped expression is available, for +// expression types that might only behave as wrappers in certain cases. +func UnwrapExpression(expr Expression) Expression { + for { + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return expr + } + innerExpr := unwrap.UnwrapExpression() + if innerExpr == nil { + return expr + } + expr = innerExpr + } +} + +// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the +// caller an opportunity to test each level of unwrapping to see each a +// particular expression is accepted. +// +// This could be used, for example, to unwrap until a particular other +// interface is satisfied, regardless of wrap wrapping level it is satisfied +// at. +// +// The given callback function must return false to continue wrapping, or +// true to accept and return the proposed expression given. If the callback +// function rejects even the final, physical expression then the result of +// this function is nil. +func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression { + for { + if until(expr) { + return expr + } + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return nil + } + expr = unwrap.UnwrapExpression() + if expr == nil { + return nil + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md new file mode 100644 index 000000000..f59ce92e9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/README.md @@ -0,0 +1,184 @@ +# HCL Dynamic Blocks Extension + +This HCL extension implements a special block type named "dynamic" that can +be used to dynamically generate blocks of other types by iterating over +collection values. + +Normally the block structure in an HCL configuration file is rigid, even +though dynamic expressions can be used within attribute values. This is +convenient for most applications since it allows the overall structure of +the document to be decoded easily, but in some applications it is desirable +to allow dynamic block generation within certain portions of the configuration. + +Dynamic block generation is performed using the `dynamic` block type: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + dynamic "nested" { + for_each = ["a", "b", "c"] + iterator = nested + content { + foo = "dynamic block ${nested.value}" + } + } + + nested { + foo = "static block 2" + } +} +``` + +The above is interpreted as if it were written as follows: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + nested { + foo = "dynamic block a" + } + + nested { + foo = "dynamic block b" + } + + nested { + foo = "dynamic block c" + } + + nested { + foo = "static block 2" + } +} +``` + +Since HCL block syntax is not normally exposed to the possibility of unknown +values, this extension must make some compromises when asked to iterate over +an unknown collection. If the length of the collection cannot be statically +recognized (because it is an unknown value of list, map, or set type) then +the `dynamic` construct will generate a _single_ dynamic block whose iterator +key and value are both unknown values of the dynamic pseudo-type, thus causing +any attribute values derived from iteration to appear as unknown values. There +is no explicit representation of the fact that the length of the collection may +eventually be different than one. + +## Usage + +Pass a body to function `Expand` to obtain a new body that will, on access +to its content, evaluate and expand any nested `dynamic` blocks. +Dynamic block processing is also automatically propagated into any nested +blocks that are returned, allowing users to nest dynamic blocks inside +one another and to nest dynamic blocks inside other static blocks. + +HCL structural decoding does not normally have access to an `EvalContext`, so +any variables and functions that should be available to the `for_each` +and `labels` expressions must be passed in when calling `Expand`. Expressions +within the `content` block are evaluated separately and so can be passed a +separate `EvalContext` if desired, during normal attribute expression +evaluation. + +## Detecting Variables + +Some applications dynamically generate an `EvalContext` by analyzing which +variables are referenced by an expression before evaluating it. + +This unfortunately requires some extra effort when this analysis is required +for the context passed to `Expand`: the HCL API requires a schema to be +provided in order to do any analysis of the blocks in a body, but the low-level +schema model provides a description of only one level of nested blocks at +a time, and thus a new schema must be provided for each additional level of +nesting. + +To make this arduous process as convenient as possible, this package provides +a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` +instance that can be used to find variables directly in a given body and also +determine which nested blocks require recursive calls. Using this mechanism +requires that the caller be able to look up a schema given a nested block type. +For _simple_ formats where a specific block type name always has the same schema +regardless of context, a walk can be implemented as follows: + +```go +func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { + vars, children := node.Visit(schema) + + for _, child := range children { + var childSchema *hcl.BodySchema + switch child.BlockTypeName { + case "a": + childSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "b", + LabelNames: []string{"key"}, + }, + }, + } + case "b": + childSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "val", + Required: true, + }, + }, + } + default: + // Should never happen, because the above cases should be exhaustive + // for the application's configuration format. + panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) + } + + vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) + } +} +``` + +### Detecting Variables with `hcldec` Specifications + +For applications that use the higher-level `hcldec` package to decode nested +configuration structures into `cty` values, the same specification can be used +to automatically drive the recursive variable-detection walk described above. + +The helper function `ForEachVariablesHCLDec` allows an entire recursive +configuration structure to be analyzed in a single call given a `hcldec.Spec` +that describes the nested block structure. This means a `hcldec`-based +application can support dynamic blocks with only a little additional effort: + +```go +func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { + // Determine which variables are needed to expand dynamic blocks + neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) + + // Build a suitable EvalContext and expand dynamic blocks + dynCtx := buildEvalContext(neededForDynamic) + dynBody := dynblock.Expand(body, dynCtx) + + // Determine which variables are needed to fully decode the expanded body + // This will analyze expressions that came both from static blocks in the + // original body and from blocks that were dynamically added by Expand. + neededForDecode := hcldec.Variables(dynBody, spec) + + // Build a suitable EvalContext and then fully decode the body as per the + // hcldec specification. + decCtx := buildEvalContext(neededForDecode) + return hcldec.Decode(dynBody, spec, decCtx) +} + +func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { + // (to be implemented by your application) +} +``` + +# Performance + +This extension is going quite harshly against the grain of the HCL API, and +so it uses lots of wrapping objects and temporary data structures to get its +work done. HCL in general is not suitable for use in high-performance situations +or situations sensitive to memory pressure, but that is _especially_ true for +this extension. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go new file mode 100644 index 000000000..65a9eab2d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_body.go @@ -0,0 +1,262 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// expandBody wraps another hcl.Body and expands any "dynamic" blocks found +// inside whenever Content or PartialContent is called. +type expandBody struct { + original hcl.Body + forEachCtx *hcl.EvalContext + iteration *iteration // non-nil if we're nested inside another "dynamic" block + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the transformer. + // + // Note that this is re-implemented here rather than delegating to the + // existing support required by the underlying body because we need to + // retain access to the entire original body on subsequent decode operations + // so we can retain any "dynamic" blocks for types we didn't take consume + // on the first pass. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]hcl.BlockHeaderSchema +} + +func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, diags := b.original.Content(extSchema) + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + return content, diags +} + +func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, _, diags := b.original.PartialContent(extSchema) + // We discard the "remain" argument above because we're going to construct + // our own remain that also takes into account remaining "dynamic" blocks. + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + remain := &expandBody{ + original: b.original, + forEachCtx: b.forEachCtx, + iteration: b.iteration, + hiddenAttrs: make(map[string]struct{}), + hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName, blockS := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = blockS + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = blockS + } + + return content, remain, diags +} + +func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + // If we have any hiddenBlocks then we also need to register those here + // so that a call to "Content" on the underlying body won't fail. + // (We'll filter these out again once we process the result of either + // Content or PartialContent.) + for _, blockS := range b.hiddenBlocks { + extSchema.Blocks = append(extSchema.Blocks, blockS) + } + + // If we have any hiddenAttrs then we also need to register these, for + // the same reason as we deal with hiddenBlocks above. + if len(b.hiddenAttrs) != 0 { + newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) + copy(newAttrs, extSchema.Attributes) + for name := range b.hiddenAttrs { + newAttrs = append(newAttrs, hcl.AttributeSchema{ + Name: name, + Required: false, + }) + } + extSchema.Attributes = newAttrs + } + + return extSchema +} + +func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { + if len(b.hiddenAttrs) == 0 && b.iteration == nil { + // Easy path: just pass through the attrs from the original body verbatim + return rawAttrs + } + + // Otherwise we have some work to do: we must filter out any attributes + // that are hidden (since a previous PartialContent call already saw these) + // and wrap the expressions of the inner attributes so that they will + // have access to our iteration variables. + attrs := make(hcl.Attributes, len(rawAttrs)) + for name, rawAttr := range rawAttrs { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + if b.iteration != nil { + attr := *rawAttr // shallow copy so we can mutate it + attr.Expr = exprWrap{ + Expression: attr.Expr, + i: b.iteration, + } + attrs[name] = &attr + } else { + // If we have no active iteration then no wrapping is required. + attrs[name] = rawAttr + } + } + return attrs +} + +func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { + var blocks hcl.Blocks + var diags hcl.Diagnostics + + for _, rawBlock := range rawBlocks { + switch rawBlock.Type { + case "dynamic": + realBlockType := rawBlock.Labels[0] + if _, hidden := b.hiddenBlocks[realBlockType]; hidden { + continue + } + + var blockS *hcl.BlockHeaderSchema + for _, candidate := range schema.Blocks { + if candidate.Type == realBlockType { + blockS = &candidate + break + } + } + if blockS == nil { + // Not a block type that the caller requested. + if !partial { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), + Subject: &rawBlock.LabelRanges[0], + }) + } + continue + } + + spec, specDiags := b.decodeSpec(blockS, rawBlock) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + continue + } + + if spec.forEachVal.IsKnown() { + for it := spec.forEachVal.ElementIterator(); it.Next(); { + key, value := it.Element() + i := b.iteration.MakeChild(spec.iteratorName, key, value) + + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + // Attach our new iteration context so that attributes + // and other nested blocks can refer to our iterator. + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + } else { + // If our top-level iteration value isn't known then we're forced + // to compromise since HCL doesn't have any concept of an + // "unknown block". In this case then, we'll produce a single + // dynamic block with the iterator values set to DynamicVal, + // which at least makes the potential for a block visible + // in our result, even though it's not represented in a fully-accurate + // way. + i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + block.Body = b.expandChild(block.Body, i) + + // We additionally force all of the leaf attribute values + // in the result to be unknown so the calling application + // can, if necessary, use that as a heuristic to detect + // when a single nested block might be standing in for + // multiple blocks yet to be expanded. This retains the + // structure of the generated body but forces all of its + // leaf attribute values to be unknown. + block.Body = unknownBody{block.Body} + + blocks = append(blocks, block) + } + } + + default: + if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { + // A static block doesn't create a new iteration context, but + // it does need to inherit _our own_ iteration context in + // case it contains expressions that refer to our inherited + // iterators, or nested "dynamic" blocks. + expandedBlock := *rawBlock // shallow copy + expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) + blocks = append(blocks, &expandedBlock) + } + } + } + + return blocks, diags +} + +func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { + chiCtx := i.EvalContext(b.forEachCtx) + ret := Expand(child, chiCtx) + ret.(*expandBody).iteration = i + return ret +} + +func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // blocks aren't allowed in JustAttributes mode and this body can + // only produce blocks, so we'll just pass straight through to our + // underlying body here. + return b.original.JustAttributes() +} + +func (b *expandBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go new file mode 100644 index 000000000..98a51eadd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expand_spec.go @@ -0,0 +1,215 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type expandSpec struct { + blockType string + blockTypeRange hcl.Range + defRange hcl.Range + forEachVal cty.Value + iteratorName string + labelExprs []hcl.Expression + contentBody hcl.Body + inherited map[string]*iteration +} + +func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var schema *hcl.BodySchema + if len(blockS.LabelNames) != 0 { + schema = dynamicBlockBodySchemaLabels + } else { + schema = dynamicBlockBodySchemaNoLabels + } + + specContent, specDiags := rawSpec.Body.Content(schema) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + return nil, diags + } + + //// for_each attribute + + eachAttr := specContent.Attributes["for_each"] + eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) + diags = append(diags, eachDiags...) + + if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { + // We skip this error for DynamicPseudoType because that means we either + // have a null (which is checked immediately below) or an unknown + // (which is handled in the expandBody Content methods). + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + if eachVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: "Cannot use a null value in for_each.", + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + + //// iterator attribute + + iteratorName := blockS.Type + if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { + itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) + diags = append(diags, itDiags...) + if itDiags.HasErrors() { + return nil, diags + } + + if len(itTraversal) != 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic iterator name", + Detail: "Dynamic iterator must be a single variable name.", + Subject: itTraversal.SourceRange().Ptr(), + }) + return nil, diags + } + + iteratorName = itTraversal.RootName() + } + + var labelExprs []hcl.Expression + if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { + var labelDiags hcl.Diagnostics + labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + if len(labelExprs) > len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic block label", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), + }) + return nil, diags + } else if len(labelExprs) < len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient dynamic block labels", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelsAttr.Expr.Range().Ptr(), + }) + return nil, diags + } + } + + // Since our schema requests only blocks of type "content", we can assume + // that all entries in specContent.Blocks are content blocks. + if len(specContent.Blocks) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing dynamic content block", + Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", + Subject: &specContent.MissingItemRange, + }) + return nil, diags + } + if len(specContent.Blocks) > 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic content block", + Detail: "Only one nested content block is allowed for each dynamic block.", + Subject: &specContent.Blocks[1].DefRange, + }) + return nil, diags + } + + return &expandSpec{ + blockType: blockS.Type, + blockTypeRange: rawSpec.LabelRanges[0], + defRange: rawSpec.DefRange, + forEachVal: eachVal, + iteratorName: iteratorName, + labelExprs: labelExprs, + contentBody: specContent.Blocks[0].Body, + }, diags +} + +func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + lCtx := i.EvalContext(ctx) + for _, labelExpr := range s.labelExprs { + labelVal, labelDiags := labelExpr.Value(lCtx) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + var convErr error + labelVal, convErr = convert.Convert(labelVal, cty.String) + if convErr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if labelVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "Cannot use a null value as a dynamic block label.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if !labelVal.IsKnown() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + + labels = append(labels, labelVal.AsString()) + labelRanges = append(labelRanges, labelExpr.Range()) + } + + block := &hcl.Block{ + Type: s.blockType, + TypeRange: s.blockTypeRange, + Labels: labels, + LabelRanges: labelRanges, + DefRange: s.defRange, + Body: s.contentBody, + } + + return block, diags +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go new file mode 100644 index 000000000..460a1d2a3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/expr_wrap.go @@ -0,0 +1,42 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type exprWrap struct { + hcl.Expression + i *iteration +} + +func (e exprWrap) Variables() []hcl.Traversal { + raw := e.Expression.Variables() + ret := make([]hcl.Traversal, 0, len(raw)) + + // Filter out traversals that refer to our iterator name or any + // iterator we've inherited; we're going to provide those in + // our Value wrapper, so the caller doesn't need to know about them. + for _, traversal := range raw { + rootName := traversal.RootName() + if rootName == e.i.IteratorName { + continue + } + if _, inherited := e.i.Inherited[rootName]; inherited { + continue + } + ret = append(ret, traversal) + } + return ret +} + +func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + extCtx := e.i.EvalContext(ctx) + return e.Expression.Value(extCtx) +} + +// UnwrapExpression returns the expression being wrapped by this instance. +// This allows the original expression to be recovered by hcl.UnwrapExpression. +func (e exprWrap) UnwrapExpression() hcl.Expression { + return e.Expression +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go new file mode 100644 index 000000000..c56638868 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/iteration.go @@ -0,0 +1,66 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type iteration struct { + IteratorName string + Key cty.Value + Value cty.Value + Inherited map[string]*iteration +} + +func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { + return &iteration{ + IteratorName: s.iteratorName, + Key: key, + Value: value, + Inherited: s.inherited, + } +} + +func (i *iteration) Object() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "key": i.Key, + "value": i.Value, + }) +} + +func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { + new := base.NewChild() + + if i != nil { + new.Variables = map[string]cty.Value{} + for name, otherIt := range i.Inherited { + new.Variables[name] = otherIt.Object() + } + new.Variables[i.IteratorName] = i.Object() + } + + return new +} + +func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { + if i == nil { + // Create entirely new root iteration, then + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + } + } + + inherited := map[string]*iteration{} + for name, otherIt := range i.Inherited { + inherited[name] = otherIt + } + inherited[i.IteratorName] = i + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + Inherited: inherited, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go new file mode 100644 index 000000000..a5bfd94ec --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/public.go @@ -0,0 +1,47 @@ +// Package dynblock provides an extension to HCL that allows dynamic +// declaration of nested blocks in certain contexts via a special block type +// named "dynamic". +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Expand "dynamic" blocks in the given body, returning a new body that +// has those blocks expanded. +// +// The given EvalContext is used when evaluating "for_each" and "labels" +// attributes within dynamic blocks, allowing those expressions access to +// variables and functions beyond the iterator variable created by the +// iteration. +// +// Expand returns no diagnostics because no blocks are actually expanded +// until a call to Content or PartialContent on the returned body, which +// will then expand only the blocks selected by the schema. +// +// "dynamic" blocks are also expanded automatically within nested blocks +// in the given body, including within other dynamic blocks, thus allowing +// multi-dimensional iteration. However, it is not possible to +// dynamically-generate the "dynamic" blocks themselves except through nesting. +// +// parent { +// dynamic "child" { +// for_each = child_objs +// content { +// dynamic "grandchild" { +// for_each = child.value.children +// labels = [grandchild.key] +// content { +// parent_key = child.key +// value = grandchild.value +// } +// } +// } +// } +// } +func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { + return &expandBody{ + original: body, + forEachCtx: ctx, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go new file mode 100644 index 000000000..b3907d6ea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/schema.go @@ -0,0 +1,50 @@ +package dynblock + +import "github.com/hashicorp/hcl/v2" + +var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, +} + +var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + { + Name: "labels", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} + +var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go new file mode 100644 index 000000000..ce98259a5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/unknown_body.go @@ -0,0 +1,84 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// unknownBody is a funny body that just reports everything inside it as +// unknown. It uses a given other body as a sort of template for what attributes +// and blocks are inside -- including source location information -- but +// subsitutes unknown values of unknown type for all attributes. +// +// This rather odd process is used to handle expansion of dynamic blocks whose +// for_each expression is unknown. Since a block cannot itself be unknown, +// we instead arrange for everything _inside_ the block to be unknown instead, +// to give the best possible approximation. +type unknownBody struct { + template hcl.Body +} + +var _ hcl.Body = unknownBody{} + +func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, diags := b.template.Content(schema) + content = b.fixupContent(content) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, diags +} + +func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + content, remain, diags := b.template.PartialContent(schema) + content = b.fixupContent(content) + remain = unknownBody{remain} // remaining content must also be wrapped + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, remain, diags +} + +func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs, diags := b.template.JustAttributes() + attrs = b.fixupAttrs(attrs) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return attrs, diags +} + +func (b unknownBody) MissingItemRange() hcl.Range { + return b.template.MissingItemRange() +} + +func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { + ret := &hcl.BodyContent{} + ret.Attributes = b.fixupAttrs(got.Attributes) + if len(got.Blocks) > 0 { + ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) + for _, gotBlock := range got.Blocks { + new := *gotBlock // shallow copy + new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown + ret.Blocks = append(ret.Blocks, &new) + } + } + + return ret +} + +func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { + if len(got) == 0 { + return nil + } + ret := make(hcl.Attributes, len(got)) + for name, gotAttr := range got { + new := *gotAttr // shallow copy + new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) + ret[name] = &new + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go new file mode 100644 index 000000000..192339295 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables.go @@ -0,0 +1,209 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// WalkVariables begins the recursive process of walking all expressions and +// nested blocks in the given body and its child bodies while taking into +// account any "dynamic" blocks. +// +// This function requires that the caller walk through the nested block +// structure in the given body level-by-level so that an appropriate schema +// can be provided at each level to inform further processing. This workflow +// is thus easiest to use for calling applications that have some higher-level +// schema representation available with which to drive this multi-step +// process. If your application uses the hcldec package, you may be able to +// use VariablesHCLDec instead for a more automatic approach. +func WalkVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + includeContent: true, + } +} + +// WalkExpandVariables is like Variables but it includes only the variables +// required for successful block expansion, ignoring any variables referenced +// inside block contents. The result is the minimal set of all variables +// required for a call to Expand, excluding variables that would only be +// needed to subsequently call Content or PartialContent on the expanded +// body. +func WalkExpandVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + } +} + +type WalkVariablesNode struct { + body hcl.Body + it *iteration + + includeContent bool +} + +type WalkVariablesChild struct { + BlockTypeName string + Node WalkVariablesNode +} + +// Body returns the HCL Body associated with the child node, in case the caller +// wants to do some sort of inspection of it in order to decide what schema +// to pass to Visit. +// +// Most implementations should just fetch a fixed schema based on the +// BlockTypeName field and not access this. Deciding on a schema dynamically +// based on the body is a strange thing to do and generally necessary only if +// your caller is already doing other bizarre things with HCL bodies. +func (c WalkVariablesChild) Body() hcl.Body { + return c.Node.body +} + +// Visit returns the variable traversals required for any "dynamic" blocks +// directly in the body associated with this node, and also returns any child +// nodes that must be visited in order to continue the walk. +// +// Each child node has its associated block type name given in its BlockTypeName +// field, which the calling application should use to determine the appropriate +// schema for the content of each child node and pass it to the child node's +// own Visit method to continue the walk recursively. +func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { + extSchema := n.extendSchema(schema) + container, _, _ := n.body.PartialContent(extSchema) + if container == nil { + return vars, children + } + + children = make([]WalkVariablesChild, 0, len(container.Blocks)) + + if n.includeContent { + for _, attr := range container.Attributes { + for _, traversal := range attr.Expr.Variables() { + var ours, inherited bool + if n.it != nil { + ours = traversal.RootName() == n.it.IteratorName + _, inherited = n.it.Inherited[traversal.RootName()] + } + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + } + + for _, block := range container.Blocks { + switch block.Type { + + case "dynamic": + blockTypeName := block.Labels[0] + inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) + if inner == nil { + continue + } + + iteratorName := blockTypeName + if attr, exists := inner.Attributes["iterator"]; exists { + iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) + if len(iterTraversal) == 0 { + // Ignore this invalid dynamic block, since it'll produce + // an error if someone tries to extract content from it + // later anyway. + continue + } + iteratorName = iterTraversal.RootName() + } + blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) + + if attr, exists := inner.Attributes["for_each"]; exists { + // Filter out iterator names inherited from parent blocks + for _, traversal := range attr.Expr.Variables() { + if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { + vars = append(vars, traversal) + } + } + } + if attr, exists := inner.Attributes["labels"]; exists { + // Filter out both our own iterator name _and_ those inherited + // from parent blocks, since we provide _both_ of these to the + // label expressions. + for _, traversal := range attr.Expr.Variables() { + ours := traversal.RootName() == iteratorName + _, inherited := blockIt.Inherited[traversal.RootName()] + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + + for _, contentBlock := range inner.Blocks { + // We only request "content" blocks in our schema, so we know + // any blocks we find here will be content blocks. We require + // exactly one content block for actual expansion, but we'll + // be more liberal here so that callers can still collect + // variables from erroneous "dynamic" blocks. + children = append(children, WalkVariablesChild{ + BlockTypeName: blockTypeName, + Node: WalkVariablesNode{ + body: contentBlock.Body, + it: blockIt, + includeContent: n.includeContent, + }, + }) + } + + default: + children = append(children, WalkVariablesChild{ + BlockTypeName: block.Type, + Node: WalkVariablesNode{ + body: block.Body, + it: n.it, + includeContent: n.includeContent, + }, + }) + + } + } + + return vars, children +} + +func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + return extSchema +} + +// This is a more relaxed schema than what's in schema.go, since we +// want to maximize the amount of variables we can find even if there +// are erroneous blocks. +var variableDetectionInnerSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: false, + }, + { + Name: "labels", + Required: false, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go new file mode 100644 index 000000000..907ef3eba --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/dynblock/variables_hcldec.go @@ -0,0 +1,43 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" +) + +// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec +// specification to automatically drive the recursive walk through nested +// blocks in the given body. +// +// This is a drop-in replacement for hcldec.Variables which is able to treat +// blocks of type "dynamic" in the same special way that dynblock.Expand would, +// exposing both the variables referenced in the "for_each" and "labels" +// arguments and variables used in the nested "content" block. +func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the +// minimal set of variables required to call Expand, ignoring variables that +// are referenced only inside normal block contents. See WalkExpandVariables +// for more information. +func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkExpandVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { + vars, children := node.Visit(hcldec.ImpliedSchema(spec)) + + if len(children) > 0 { + childSpecs := hcldec.ChildBlockTypes(spec) + for _, child := range children { + if childSpec, exists := childSpecs[child.BlockTypeName]; exists { + vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) + } + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md new file mode 100644 index 000000000..ec7094702 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/README.md @@ -0,0 +1,67 @@ +# HCL Type Expressions Extension + +This HCL extension defines a convention for describing HCL types using function +call and variable reference syntax, allowing configuration formats to include +type information provided by users. + +The type syntax is processed statically from a hcl.Expression, so it cannot +use any of the usual language operators. This is similar to type expressions +in statically-typed programming languages. + +```hcl +variable "example" { + type = list(string) +} +``` + +The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` +functions, and so it relies on the underlying syntax to define how "keyword" +and "call" are interpreted. The above shows how they are interpreted in +the HCL native syntax, while the following shows the same information +expressed in JSON: + +```json +{ + "variable": { + "example": { + "type": "list(string)" + } + } +} +``` + +Notice that since we have additional contextual information that we intend +to allow only calls and keywords the JSON syntax is able to parse the given +string directly as an expression, rather than as a template as would be +the case for normal expression evaluation. + +For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl/v2/ext/typeexpr). + +## Type Expression Syntax + +When expressed in the native syntax, the following expressions are permitted +in a type expression: + +* `string` - string +* `bool` - boolean +* `number` - number +* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) +* `list()` - list of the type given as an argument +* `set()` - set of the type given as an argument +* `map()` - map of the type given as an argument +* `tuple([])` - tuple with the element types given in the single list argument +* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument + +For example: + +* `list(string)` +* `object({name=string,age=number})` +* `map(object({name=string,age=number}))` + +Note that the object constructor syntax is not fully-general for all possible +object types because it requires the attribute names to be valid identifiers. +In practice it is expected that any time an object type is being fixed for +type checking it will be one that has identifiers as its attributes; object +types with weird attributes generally show up only from arbitrary object +constructors in configuration files, which are usually treated either as maps +or as the dynamic pseudo-type. diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go new file mode 100644 index 000000000..c4b379579 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/doc.go @@ -0,0 +1,11 @@ +// Package typeexpr extends HCL with a convention for describing HCL types +// within configuration files. +// +// The type syntax is processed statically from a hcl.Expression, so it cannot +// use any of the usual language operators. This is similar to type expressions +// in statically-typed programming languages. +// +// variable "example" { +// type = list(string) +// } +package typeexpr diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go new file mode 100644 index 000000000..11b068979 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/get_type.go @@ -0,0 +1,196 @@ +package typeexpr + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +const invalidTypeSummary = "Invalid type specification" + +// getType is the internal implementation of both Type and TypeConstraint, +// using the passed flag to distinguish. When constraint is false, the "any" +// keyword will produce an error. +func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { + // First we'll try for one of our keywords + kw := hcl.ExprAsKeyword(expr) + switch kw { + case "bool": + return cty.Bool, nil + case "string": + return cty.String, nil + case "number": + return cty.Number, nil + case "any": + if constraint { + return cty.DynamicPseudoType, nil + } + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), + Subject: expr.Range().Ptr(), + }} + case "list", "map", "set": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), + Subject: expr.Range().Ptr(), + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: expr.Range().Ptr(), + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: expr.Range().Ptr(), + }} + case "": + // okay! we'll fall through and try processing as a call, then. + default: + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), + Subject: expr.Range().Ptr(), + }} + } + + // If we get down here then our expression isn't just a keyword, so we'll + // try to process it as a call instead. + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", + Subject: expr.Range().Ptr(), + }} + } + + switch call.Name { + case "bool", "string", "number", "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} + } + + if len(call.Arguments) != 1 { + contextRange := call.ArgsRange + subjectRange := call.ArgsRange + if len(call.Arguments) > 1 { + // If we have too many arguments (as opposed to too _few_) then + // we'll highlight the extraneous arguments as the diagnostic + // subject. + subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) + } + + switch call.Name { + case "list", "set", "map": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), + Subject: &subjectRange, + Context: &contextRange, + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: &subjectRange, + Context: &contextRange, + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: &subjectRange, + Context: &contextRange, + }} + } + } + + switch call.Name { + + case "list": + ety, diags := getType(call.Arguments[0], constraint) + return cty.List(ety), diags + case "set": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Set(ety), diags + case "map": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Map(ety), diags + case "object": + attrDefs, diags := hcl.ExprMap(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + + atys := make(map[string]cty.Type) + for _, attrDef := range attrDefs { + attrName := hcl.ExprAsKeyword(attrDef.Key) + if attrName == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object constructor map keys must be attribute names.", + Subject: attrDef.Key.Range().Ptr(), + Context: expr.Range().Ptr(), + }) + continue + } + aty, attrDiags := getType(attrDef.Value, constraint) + diags = append(diags, attrDiags...) + atys[attrName] = aty + } + return cty.Object(atys), diags + case "tuple": + elemDefs, diags := hcl.ExprList(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Tuple type constructor requires a list of element types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + etys := make([]cty.Type, len(elemDefs)) + for i, defExpr := range elemDefs { + ety, elemDiags := getType(defExpr, constraint) + diags = append(diags, elemDiags...) + etys[i] = ety + } + return cty.Tuple(etys), diags + default: + // Can't access call.Arguments in this path because we've not validated + // that it contains exactly one expression here. + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), + Subject: expr.Range().Ptr(), + }} + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go new file mode 100644 index 000000000..3b8f618fb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ext/typeexpr/public.go @@ -0,0 +1,129 @@ +package typeexpr + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// Type attempts to process the given expression as a type expression and, if +// successful, returns the resulting type. If unsuccessful, error diagnostics +// are returned. +func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, false) +} + +// TypeConstraint attempts to parse the given expression as a type constraint +// and, if successful, returns the resulting type. If unsuccessful, error +// diagnostics are returned. +// +// A type constraint has the same structure as a type, but it additionally +// allows the keyword "any" to represent cty.DynamicPseudoType, which is often +// used as a wildcard in type checking and type conversion operations. +func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, true) +} + +// TypeString returns a string rendering of the given type as it would be +// expected to appear in the HCL native syntax. +// +// This is primarily intended for showing types to the user in an application +// that uses typexpr, where the user can be assumed to be familiar with the +// type expression syntax. In applications that do not use typeexpr these +// results may be confusing to the user and so type.FriendlyName may be +// preferable, even though it's less precise. +// +// TypeString produces reasonable results only for types like what would be +// produced by the Type and TypeConstraint functions. In particular, it cannot +// support capsule types. +func TypeString(ty cty.Type) string { + // Easy cases first + switch ty { + case cty.String: + return "string" + case cty.Bool: + return "bool" + case cty.Number: + return "number" + case cty.DynamicPseudoType: + return "any" + } + + if ty.IsCapsuleType() { + panic("TypeString does not support capsule types") + } + + if ty.IsCollectionType() { + ety := ty.ElementType() + etyString := TypeString(ety) + switch { + case ty.IsListType(): + return fmt.Sprintf("list(%s)", etyString) + case ty.IsSetType(): + return fmt.Sprintf("set(%s)", etyString) + case ty.IsMapType(): + return fmt.Sprintf("map(%s)", etyString) + default: + // Should never happen because the above is exhaustive + panic("unsupported collection type") + } + } + + if ty.IsObjectType() { + var buf bytes.Buffer + buf.WriteString("object({") + atys := ty.AttributeTypes() + names := make([]string, 0, len(atys)) + for name := range atys { + names = append(names, name) + } + sort.Strings(names) + first := true + for _, name := range names { + aty := atys[name] + if !first { + buf.WriteByte(',') + } + if !hclsyntax.ValidIdentifier(name) { + // Should never happen for any type produced by this package, + // but we'll do something reasonable here just so we don't + // produce garbage if someone gives us a hand-assembled object + // type that has weird attribute names. + // Using Go-style quoting here isn't perfect, since it doesn't + // exactly match HCL syntax, but it's fine for an edge-case. + buf.WriteString(fmt.Sprintf("%q", name)) + } else { + buf.WriteString(name) + } + buf.WriteByte('=') + buf.WriteString(TypeString(aty)) + first = false + } + buf.WriteString("})") + return buf.String() + } + + if ty.IsTupleType() { + var buf bytes.Buffer + buf.WriteString("tuple([") + etys := ty.TupleElementTypes() + first := true + for _, ety := range etys { + if !first { + buf.WriteByte(',') + } + buf.WriteString(TypeString(ety)) + first = false + } + buf.WriteString("])") + return buf.String() + } + + // Should never happen because we covered all cases above. + panic(fmt.Errorf("unsupported type %#v", ty)) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/go.mod b/vendor/github.com/hashicorp/hcl/v2/go.mod new file mode 100644 index 000000000..c152e6016 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/go.mod @@ -0,0 +1,21 @@ +module github.com/hashicorp/hcl/v2 + +require ( + github.com/agext/levenshtein v1.2.1 + github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 + github.com/apparentlymart/go-textseg v1.0.0 + github.com/davecgh/go-spew v1.1.1 + github.com/go-test/deep v1.0.3 + github.com/google/go-cmp v0.2.0 + github.com/kr/pretty v0.1.0 + github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/sergi/go-diff v1.0.0 + github.com/spf13/pflag v1.0.2 + github.com/stretchr/testify v1.2.2 // indirect + github.com/zclconf/go-cty v1.1.0 + golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 + golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 // indirect + golang.org/x/text v0.3.2 // indirect +) diff --git a/vendor/github.com/hashicorp/hcl/v2/go.sum b/vendor/github.com/hashicorp/hcl/v2/go.sum new file mode 100644 index 000000000..b3b95415f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/go.sum @@ -0,0 +1,51 @@ +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go new file mode 100644 index 000000000..7ba08eee0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go new file mode 100644 index 000000000..aa3c6ea9e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/doc.go @@ -0,0 +1,53 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Only a subset of this tagging/typing vocabulary is supported for the +// "Encode" family of functions. See the EncodeIntoBody docs for full details +// on the constraints there. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go new file mode 100644 index 000000000..d612e09c9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/encode.go @@ -0,0 +1,191 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EncodeIntoBody replaces the contents of the given hclwrite Body with +// attributes and blocks derived from the given value, which must be a +// struct value or a pointer to a struct value with the struct tags defined +// in this package. +// +// This function can work only with fully-decoded data. It will ignore any +// fields tagged as "remain", any fields that decode attributes into either +// hcl.Attribute or hcl.Expression values, and any fields that decode blocks +// into hcl.Attributes values. This function does not have enough information +// to complete the decoding of these types. +// +// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock +// to produce a whole hclwrite.Block including block labels. +// +// As long as a suitable value is given to encode and the destination body +// is non-nil, this function will always complete. It will panic in case of +// any errors in the calling program, such as passing an inappropriate type +// or a nil body. +// +// The layout of the resulting HCL source is derived from the ordering of +// the struct fields, with blank lines around nested blocks of different types. +// Fields representing attributes should usually precede those representing +// blocks so that the attributes can group togather in the result. For more +// control, use the hclwrite API directly. +func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + populateBody(rv, ty, tags, dst) +} + +// EncodeAsBlock creates a new hclwrite.Block populated with the data from +// the given value, which must be a struct or pointer to struct with the +// struct tags defined in this package. +// +// If the given struct type has fields tagged with "label" tags then they +// will be used in order to annotate the created block with labels. +// +// This function has the same constraints as EncodeIntoBody and will panic +// if they are violated. +func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + labels := make([]string, len(tags.Labels)) + for i, lf := range tags.Labels { + lv := rv.Field(lf.FieldIndex) + // We just stringify whatever we find. It should always be a string + // but if not then we'll still do something reasonable. + labels[i] = fmt.Sprintf("%s", lv.Interface()) + } + + block := hclwrite.NewBlock(blockType, labels) + populateBody(rv, ty, tags, block.Body()) + return block +} + +func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { + nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) + namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) + for n, i := range tags.Attributes { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + for n, i := range tags.Blocks { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + sort.SliceStable(namesOrder, func(i, j int) bool { + ni, nj := namesOrder[i], namesOrder[j] + return nameIdxs[ni] < nameIdxs[nj] + }) + + dst.Clear() + + prevWasBlock := false + for _, name := range namesOrder { + fieldIdx := nameIdxs[name] + field := ty.Field(fieldIdx) + fieldTy := field.Type + fieldVal := rv.Field(fieldIdx) + + if fieldTy.Kind() == reflect.Ptr { + fieldTy = fieldTy.Elem() + fieldVal = fieldVal.Elem() + } + + if _, isAttr := tags.Attributes[name]; isAttr { + + if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { + continue // ignore undecoded fields + } + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + if prevWasBlock { + dst.AppendNewline() + prevWasBlock = false + } + + valTy, err := gocty.ImpliedType(fieldVal.Interface()) + if err != nil { + panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) + } + + val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) + if err != nil { + // This should never happen, since we should always be able + // to decode into the implied type. + panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) + } + + dst.SetAttributeValue(name, val) + + } else { // must be a block, then + elemTy := fieldTy + isSeq := false + if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { + isSeq = true + elemTy = elemTy.Elem() + } + + if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { + continue // ignore undecoded fields + } + prevWasBlock = false + + if isSeq { + l := fieldVal.Len() + for i := 0; i < l; i++ { + elemVal := fieldVal.Index(i) + if !elemVal.IsValid() { + continue // ignore (elem value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(elemVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } else { + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(fieldVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go new file mode 100644 index 000000000..ecf6ddbac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/schema.go @@ -0,0 +1,174 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go new file mode 100644 index 000000000..a8d00f8ff --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl/v2" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go new file mode 100644 index 000000000..71de45193 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl/v2" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go new file mode 100644 index 000000000..c6e42236d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go new file mode 100644 index 000000000..23bfe542b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go new file mode 100644 index 000000000..e2027cfd2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go new file mode 100644 index 000000000..1fa548d0c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/public.go @@ -0,0 +1,81 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} + +// ChildBlockTypes returns a map of all of the child block types declared +// by the given spec, with block type names as keys and the associated +// nested body specs as values. +func ChildBlockTypes(spec Spec) map[string]Spec { + ret := map[string]Spec{} + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if bs, ok := s.(blockSpec); ok { + for _, blockS := range bs.blockHeaderSchemata() { + nested := bs.nestedSpec() + if nested != nil { // nil can be returned to dynamically opt out of this interface + ret[blockS.Type] = nested + } + } + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go new file mode 100644 index 000000000..ddbe7fa4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl/v2" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go new file mode 100644 index 000000000..6f2d9732c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/spec.go @@ -0,0 +1,1567 @@ +package hcldec + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema + nestedSpec() Spec +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockListSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a list, all of the decoded elements must have the + // same type or cty.ListVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockTupleSpec is a Spec that produces a cty tuple of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// This is similar to BlockListSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockTupleSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockTupleSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.EmptyTupleVal + } else { + ret = cty.TupleVal(elems) + } + + return ret, diags +} + +func (s *BlockTupleSpec) impliedType() cty.Type { + // We can't predict our type, because we don't know how many blocks + // there will be until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSetSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a set, all of the decoded elements must have the + // same type or cty.SetVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockMapSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockMapSpec with no Nested Spec") + } + if ImpliedType(s).HasDynamicTypes() { + panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockObjectSpec is a Spec that produces a cty object of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of object structure is created for each of the given label names. +// There must be at least one given label name. +// +// This is similar to BlockMapSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockObjectSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockObjectSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockObjectSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.EmptyObjectVal, diags + } + + var ctyObj func(map[string]interface{}, int) cty.Value + ctyObj = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyObj(v.(map[string]interface{}), depth-1) + } + } + return cty.ObjectVal(vals) + } + + return ctyObj(elems, len(s.LabelNames)), diags +} + +func (s *BlockObjectSpec) impliedType() cty.Type { + // We can't predict our type, since we don't know how many blocks are + // present and what labels they have until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockAttrsSpec is a Spec that interprets a single block as if it were +// a map of some element type. That is, each attribute within the block +// becomes a key in the resulting map and the attribute's value becomes the +// element value, after conversion to the given element type. The resulting +// value is a cty.Map of the given element type. +// +// This spec imposes a validation constraint that there be exactly one block +// of the given type name and that this block may contain only attributes. The +// block does not accept any labels. +// +// This is an alternative to an AttrSpec of a map type for situations where +// block syntax is desired. Note that block syntax does not permit dynamic +// keys, construction of the result via a "for" expression, etc. In most cases +// an AttrSpec is preferred if the desired result is a map whose keys are +// chosen by the user rather than by schema. +type BlockAttrsSpec struct { + TypeName string + ElementType cty.Type + Required bool +} + +func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// blockSpec implementation +func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: nil, + }, + } +} + +// blockSpec implementation +func (s *BlockAttrsSpec) nestedSpec() Spec { + // This is an odd case: we aren't actually going to apply a nested spec + // in this case, since we're going to interpret the body directly as + // attributes, but we need to return something non-nil so that the + // decoder will recognize this as a block spec. We won't actually be + // using this for anything at decode time. + return noopSpec{} +} + +// specNeedingVariables implementation +func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + + block, _ := s.findBlock(content) + if block == nil { + return nil + } + + var vars []hcl.Traversal + + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil + } + + for _, attr := range attrs { + vars = append(vars, attr.Expr.Variables()...) + } + + // We'll return the variables references in source order so that any + // error messages that result are also in source order. + sort.Slice(vars, func(i, j int) bool { + return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte + }) + + return vars +} + +func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + block, other := s.findBlock(content) + if block == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(cty.Map(s.ElementType)), diags + } + if other != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, block.DefRange.String(), + ), + Subject: &other.DefRange, + }) + } + + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + if len(attrs) == 0 { + return cty.MapValEmpty(s.ElementType), diags + } + + vals := make(map[string]cty.Value, len(attrs)) + for name, attr := range attrs { + attrVal, attrDiags := attr.Expr.Value(ctx) + diags = append(diags, attrDiags...) + + attrVal, err := convert.Convert(attrVal, s.ElementType) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute value", + Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), + Subject: attr.Expr.Range().Ptr(), + }) + attrVal = cty.UnknownVal(s.ElementType) + } + + vals[name] = attrVal + } + + return cty.MapVal(vals), diags +} + +func (s *BlockAttrsSpec) impliedType() cty.Type { + return cty.Map(s.ElementType) +} + +func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + block, _ := s.findBlock(content) + if block == nil { + return content.MissingItemRange + } + return block.DefRange +} + +func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + if block != nil { + return block, candidate + } + block = candidate + } + + return block, nil +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +// +// Any requirements imposed by the "Default" spec apply even if "Primary" does +// not return null. For example, if the "Default" spec is for a required +// attribute then that attribute is always required, regardless of the result +// of the "Primary" spec. +// +// The "Default" spec must not describe a nested block, since otherwise the +// result of ChildBlockTypes would not be decidable without evaluation. If +// the default spec _does_ describe a nested block then the result is +// undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +// attrSpec implementation +func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { + // We must pass through the union of both of our nested specs so that + // we'll have both values available in the result. + var ret []hcl.AttributeSchema + if as, ok := s.Primary.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + if as, ok := s.Default.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + return ret +} + +// blockSpec implementation +func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + // Only the primary spec may describe a block, since otherwise + // our nestedSpec method below can't know which to return. + if bs, ok := s.Primary.(blockSpec); ok { + return bs.blockHeaderSchemata() + } + return nil +} + +// blockSpec implementation +func (s *DefaultSpec) nestedSpec() Spec { + if bs, ok := s.Primary.(blockSpec); ok { + return bs.nestedSpec() + } + return nil +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} + +// TransformExprSpec is a spec that wraps another and then evaluates a given +// hcl.Expression on the result. +// +// The implied type of this spec is determined by evaluating the expression +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +type TransformExprSpec struct { + Wrapped Spec + Expr hcl.Expression + TransformCtx *hcl.EvalContext + VarName string +} + +func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: wrappedVal, + } + resultVal, resultDiags := s.Expr.Value(chiCtx) + diags = append(diags, resultDiags...) + return resultVal, diags +} + +func (s *TransformExprSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: cty.UnknownVal(wrappedTy), + } + resultVal, _ := s.Expr.Value(chiCtx) + return resultVal.Type() +} + +func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// TransformFuncSpec is a spec that wraps another and then evaluates a given +// cty function with the result. The given function must expect exactly one +// argument, where the result of the wrapped spec will be passed. +// +// The implied type of this spec is determined by type-checking the function +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +// +// If the given function produces an error when run, this spec will produce +// a non-user-actionable diagnostic message. It's the caller's responsibility +// to ensure that the given function cannot fail for any non-error result +// of the wrapped spec. +type TransformFuncSpec struct { + Wrapped Spec + Func function.Function +} + +func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) + if err != nil { + // This is not a good example of a diagnostic because it is reporting + // a programming error in the calling application, rather than something + // an end-user could act on. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Transform function failed", + Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), + Subject: s.sourceRange(content, blockLabels).Ptr(), + }) + return cty.UnknownVal(s.impliedType()), diags + } + + return resultVal, diags +} + +func (s *TransformFuncSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) + if err != nil { + // Should never happen with a correctly-configured spec + return cty.DynamicPseudoType + } + + return resultTy +} + +func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// noopSpec is a placeholder spec that does nothing, used in situations where +// a non-nil placeholder spec is required. It is not exported because there is +// no reason to use it directly; it is always an implementation detail only. +type noopSpec struct { +} + +func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return cty.NullVal(cty.DynamicPseudoType), nil +} + +func (s noopSpec) impliedType() cty.Type { + return cty.DynamicPseudoType +} + +func (s noopSpec) visitSameBodyChildren(cb visitFunc) { + // nothing to do +} + +func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No useful range for a noopSpec, and nobody should be calling this anyway. + return hcl.Range{ + Filename: "noopSpec", + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go new file mode 100644 index 000000000..f8440eb60 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcldec/variables.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + var vars []hcl.Traversal + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + + var visitFn visitFunc + visitFn = func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + s.visitSameBodyChildren(visitFn) + } + spec.visitSameBodyChildren(visitFn) + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go b/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go new file mode 100644 index 000000000..1a8014480 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcled/doc.go @@ -0,0 +1,4 @@ +// Package hcled provides functionality intended to help an application +// that embeds HCL to deliver relevant information to a text editor or IDE +// for navigating around and analyzing configuration files. +package hcled diff --git a/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go new file mode 100644 index 000000000..050ad758f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hcled/navigation.go @@ -0,0 +1,34 @@ +package hcled + +import ( + "github.com/hashicorp/hcl/v2" +) + +type contextStringer interface { + ContextString(offset int) string +} + +// ContextString returns a string describing the context of the given byte +// offset, if available. An empty string is returned if no such information +// is available, or otherwise the returned string is in a form that depends +// on the language used to write the referenced file. +func ContextString(file *hcl.File, offset int) string { + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} + +type contextDefRanger interface { + ContextDefRange(offset int) hcl.Range +} + +func ContextDefRange(file *hcl.File, offset int) hcl.Range { + if cser, ok := file.Nav.(contextDefRanger); ok { + defRange := cser.ContextDefRange(offset) + if !defRange.Empty() { + return defRange + } + } + return file.Body.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go new file mode 100644 index 000000000..1dc2eccd8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclparse/parser.go @@ -0,0 +1,135 @@ +// Package hclparse has the main API entry point for parsing both HCL native +// syntax and HCL JSON. +// +// The main HCL package also includes SimpleParse and SimpleParseFile which +// can be a simpler interface for the common case where an application just +// needs to parse a single file. The gohcl package simplifies that further +// in its SimpleDecode function, which combines hcl.SimpleParse with decoding +// into Go struct values +// +// Package hclparse, then, is useful for applications that require more fine +// control over parsing or which need to load many separate files and keep +// track of them for possible error reporting or other analysis. +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go new file mode 100644 index 000000000..8c20286b2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go @@ -0,0 +1,23 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" +) + +// setDiagEvalContext is an internal helper that will impose a particular +// EvalContext on a set of diagnostics in-place, for any diagnostic that +// does not already have an EvalContext set. +// +// We generally expect diagnostics to be immutable, but this is safe to use +// on any Diagnostics where none of the contained Diagnostic objects have yet +// been seen by a caller. Its purpose is to apply additional context to a +// set of diagnostics produced by a "deeper" component as the stack unwinds +// during expression evaluation. +func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) { + for _, diag := range diags { + if diag.Expression == nil { + diag.Expression = expr + diag.EvalContext = ctx + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go new file mode 100644 index 000000000..ccc1c0ae2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go new file mode 100644 index 000000000..617bc29dc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for HCL's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion package hclparse. +package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go new file mode 100644 index 000000000..963ed7752 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go @@ -0,0 +1,1477 @@ +package hclsyntax + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as HCL expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *LiteralValueExpr) AsTraversal() hcl.Traversal { + // This one's a little weird: the contract for AsTraversal is to interpret + // an expression as if it were traversal syntax, and traversal syntax + // doesn't have the special keywords "null", "true", and "false" so these + // are expected to be treated like variables in that case. + // Since our parser already turned them into LiteralValueExpr by the time + // we get here, we need to undo this and infer the name that would've + // originally led to our value. + // We don't do anything for any other values, since they don't overlap + // with traversal roots. + + if e.Val.IsNull() { + // In practice the parser only generates null values of the dynamic + // pseudo-type for literals, so we can safely assume that any null + // was orignally the keyword "null". + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "null", + SrcRange: e.SrcRange, + }, + } + } + + switch e.Val { + case cty.True: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "true", + SrcRange: e.SrcRange, + }, + } + case cty.False: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "false", + SrcRange: e.SrcRange, + }, + } + default: + // No traversal is possible for any other value. + return nil + } +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := e.Traversal.TraverseAbs(ctx) + setDiagEvalContext(diags, e, ctx) + return val, diags +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + setDiagEvalContext(travDiags, e, ctx) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our source can. + st, diags := hcl.AbsTraversalForExpr(e.Source) + if diags.HasErrors() { + return nil + } + + ret := make(hcl.Traversal, len(st)+len(e.Traversal)) + copy(ret, st) + copy(ret[len(st):], e.Traversal) + return ret +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for _, arg := range e.Args { + w(arg) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Condition) + w(e.TrueResult) + w(e.FalseResult) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + resultType := cty.DynamicPseudoType + convs := make([]convert.Conversion, 2) + + switch { + // If either case is a dynamic null value (which would result from a + // literal null in the config), we know that it can convert to the expected + // type of the opposite case, and we don't need to speculatively reduce the + // final result type to DynamicPseudoType. + + // If we know that either Type is a DynamicPseudoType, we can be certain + // that the other value can convert since it's a pass-through, and we don't + // need to unify the types. If the final evaluation results in the dynamic + // value being returned, there's no conversion we can do, so we return the + // value directly. + case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = falseResult.Type() + convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = trueResult.Type() + convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + case trueResult.Type() == cty.DynamicPseudoType, falseResult.Type() == cty.DynamicPseudoType: + // the final resultType type is still unknown + // we don't need to get the conversion, because both are a noop. + + default: + // Try to find a type that both results can be converted to. + resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + } + + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.TrueResult, + EvalContext: ctx, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.FalseResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.FalseResult, + EvalContext: ctx, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + w(e.Collection) + w(e.Key) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + val, indexDiags := hcl.Index(coll, key, &e.SrcRange) + setDiagEvalContext(indexDiags, e, ctx) + diags = append(diags, indexDiags...) + return val, diags +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for _, expr := range e.Exprs { + w(expr) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for _, item := range e.Items { + w(item.KeyExpr) + w(item.ValueExpr) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.KeyExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprMap +func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair { + ret := make([]hcl.KeyValuePair, len(e.Items)) + for i, item := range e.Items { + ret[i] = hcl.KeyValuePair{ + Key: item.KeyExpr, + Value: item.ValueExpr, + } + } + return ret +} + +// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys, +// which deals with the special case that a naked identifier in that position +// must be interpreted as a literal string rather than evaluated directly. +type ObjectConsKeyExpr struct { + Wrapped Expression + ForceNonLiteral bool +} + +func (e *ObjectConsKeyExpr) literalName() string { + // This is our logic for deciding whether to behave like a literal string. + // We lean on our AbsTraversalForExpr implementation here, which already + // deals with some awkward cases like the expression being the result + // of the keywords "null", "true" and "false" which we'd want to interpret + // as keys here too. + return hcl.ExprAsKeyword(e.Wrapped) +} + +func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { + // We only treat our wrapped expression as a real expression if we're + // not going to interpret it as a literal. + if e.literalName() == "" { + w(e.Wrapped) + } +} + +func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // Because we accept a naked identifier as a literal key rather than a + // reference, it's confusing to accept a traversal containing periods + // here since we can't tell if the user intends to create a key with + // periods or actually reference something. To avoid confusing downstream + // errors we'll just prohibit a naked multi-step traversal here and + // require the user to state their intent more clearly. + // (This is handled at evaluation time rather than parse time because + // an application using static analysis _can_ accept a naked multi-step + // traversal here, if desired.) + if !e.ForceNonLiteral { + if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 { + var diags hcl.Diagnostics + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous attribute key", + Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.", + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + + if ln := e.literalName(); ln != "" { + return cty.StringVal(ln), nil + } + } + return e.Wrapped.Value(ctx) +} + +func (e *ObjectConsKeyExpr) Range() hcl.Range { + return e.Wrapped.Range() +} + +func (e *ObjectConsKeyExpr) StartRange() hcl.Range { + return e.Wrapped.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal { + // If we're forcing a non-literal then we can never be interpreted + // as a traversal. + if e.ForceNonLiteral { + return nil + } + + // We can produce a traversal only if our wrappee can. + st, diags := hcl.AbsTraversalForExpr(e.Wrapped) + if diags.HasErrors() { + return nil + } + + return st +} + +func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { + return e.Wrapped +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + w(e.CollExpr) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + sourceTy := sourceVal.Type() + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-item tuple, or as + // an empty tuple if the value is null. + autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType()) + + if sourceVal.IsNull() { + if autoUpgrade { + return cty.EmptyTupleVal, diags + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + Expression: e.Source, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + + if autoUpgrade { + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) + sourceTy = sourceVal.Type() + } + + // We'll compute our result type lazily if we need it. In the normal case + // it's inferred automatically from the value we construct. + resultTy := func() (cty.Type, hcl.Diagnostics) { + chiCtx := ctx.NewChild() + var diags hcl.Diagnostics + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + ety := sourceTy.ElementType() + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + return cty.List(val.Type()), diags + case sourceTy.IsTupleType(): + etys := sourceTy.TupleElementTypes() + resultTys := make([]cty.Type, 0, len(etys)) + for _, ety := range etys { + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + resultTys = append(resultTys, val.Type()) + } + return cty.Tuple(resultTys), diags + default: + // Should never happen because of our promotion to list above. + return cty.DynamicPseudoType, diags + } + } + + if !sourceVal.IsKnown() { + // We can't produce a known result in this case, but we'll still + // indicate what the result type would be, allowing any downstream type + // checking to proceed. + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.UnknownVal(ty), diags + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + // We'll ingore the resultTy diagnostics in this case since they + // will just be the same errors we saw while iterating above. + ty, _ := resultTy() + return cty.UnknownVal(ty), diags + } + + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + if len(vals) == 0 { + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.ListValEmpty(ty.ElementType()), diags + } + return cty.ListVal(vals), diags + default: + return cty.TupleVal(vals), diags + } +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) + w(e.Each) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + + // values and its associated lock are used to isolate concurrent + // evaluations of a symbol from one another. It is the calling application's + // responsibility to ensure that the same splat expression is not evalauted + // concurrently within the _same_ EvalContext, but it is fine and safe to + // do cuncurrent evaluations with distinct EvalContexts. + values map[*hcl.EvalContext]cty.Value + valuesLock sync.RWMutex +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + + e.valuesLock.RLock() + defer e.valuesLock.RUnlock() + + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go new file mode 100644 index 000000000..c1db0cecc --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go @@ -0,0 +1,268 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.LHS) + w(e.RHS) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.LHS, + EvalContext: ctx, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.RHS, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.Val) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Val, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go new file mode 100644 index 000000000..9d425115f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go @@ -0,0 +1,220 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for _, part := range e.Parts { + w(part) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// IsStringLiteral returns true if and only if the template consists only of +// single string literal, as would be created for a simple quoted string like +// "foo". +// +// If this function returns true, then calling Value on the same expression +// with a nil EvalContext will return the literal value. +// +// Note that "${"foo"}", "${1}", etc aren't considered literal values for the +// purposes of this method, because the intent of this method is to identify +// situations where the user seems to be explicitly intending literal string +// interpretation, not situations that result in literals as a technicality +// of the template expression unwrapping behavior. +func (e *TemplateExpr) IsStringLiteral() bool { + if len(e.Parts) != 1 { + return false + } + _, ok := e.Parts[0].(*LiteralValueExpr) + return ok +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + w(e.Tuple) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + w(e.Wrapped) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go new file mode 100644 index 000000000..a82bf790e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go @@ -0,0 +1,76 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl/v2" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go new file mode 100644 index 000000000..f55e9ce2c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go new file mode 100644 index 000000000..841656a6a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go @@ -0,0 +1,9 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate ragel -Z scan_string_lit.rl +//go:generate gofmt -w scan_string_lit.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go new file mode 100644 index 000000000..eef8b9626 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go new file mode 100644 index 000000000..af98ef045 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go @@ -0,0 +1,59 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" +) + +type navigation struct { + root *Body +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} + +func (n navigation) ContextDefRange(offset int) hcl.Range { + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return hcl.Range{} + } + + return block.DefRange() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go new file mode 100644 index 000000000..41b35e53f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go new file mode 100644 index 000000000..6fb284a8f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go @@ -0,0 +1,2054 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The argument %q was already set at %s. Each argument may be set only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid argument name", + Detail: "Argument names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident, false) + case TokenOQuote, TokenOBrace, TokenIdent: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +// parseSingleAttrBody is a weird variant of ParseBody that deals with the +// body of a nested block containing only one attribute value all on a single +// line, like foo { bar = baz } . It expects to find a single attribute item +// immediately followed by the end token type with no intervening newlines. +func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + var attr *Attribute + var diags hcl.Diagnostics + + next := p.Peek() + + switch next.Type { + case TokenEqual: + node, attrDiags := p.finishParsingBodyAttribute(ident, true) + diags = append(diags, attrDiags...) + attr = node.(*Attribute) + case TokenOQuote, TokenOBrace, TokenIdent: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument definition required", + Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes), + Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(), + }, + } + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return &Body{ + Attributes: Attributes{ + string(ident.Bytes): attr, + }, + + SrcRange: attr.SrcRange, + EndRange: hcl.Range{ + Filename: attr.SrcRange.Filename, + Start: attr.SrcRange.End, + End: attr.SrcRange.End, + }, + }, diags + +} + +func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + if !singleLine { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + summary := "Missing newline after argument" + detail := "An argument definition must end with a newline." + + if end.Type == TokenComma { + summary = "Unexpected comma after argument" + detail = "Argument definitions must be separated by newlines, not commas. " + detail + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: summary, + Detail: detail, + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + // parseQuoteStringLiteral recovers up to the closing quote + // if it encounters problems, so we can continue looking for + // more labels and eventually the block body even. + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: &Body{ + SrcRange: ident.Range, + EndRange: ident.Range, + }, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + var body *Body + var bodyDiags hcl.Diagnostics + switch p.Peek().Type { + case TokenNewline, TokenEOF, TokenCBrace: + body, bodyDiags = p.ParseBody(TokenCBrace) + default: + // Special one-line, single-attribute block parsing mode. + body, bodyDiags = p.parseSingleAttrBody(TokenCBrace) + switch p.Peek().Type { + case TokenCBrace: + p.Read() // the happy path - just consume the closing brace + case TokenComma: + // User seems to be trying to use the object-constructor + // comma-separated style, which isn't permitted for blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + case TokenNewline: + // We don't allow weird mixtures of single and multi-line syntax. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + default: + // Some other weird thing is going on. Since we can't guess a likely + // user intent for this one, we'll skip it if we're already in + // recovery mode. + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenCBrace) + } + } + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline || eol.Type == TokenEOF { + p.Read() // eat newline + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + p.recoverAfterBodyItem() + } + + // We must never produce a nil body, since the caller may attempt to + // do analysis of a partial result when there's an error, so we'll + // insert a placeholder if we otherwise failed to produce a valid + // body due to one of the syntax error paths above. + if body == nil && diags.HasErrors() { + body = &Body{ + SrcRange: hcl.RangeBetween(oBrace.Range, cBraceRange), + EndRange: cBraceRange, + } + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret, moreDiags := p.parseExpressionTraversals(term) + diags = append(diags, moreDiags...) + return ret, diags +} + +func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := from + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + switch p.Peek().Type { + case TokenStar: + // This is a full splat expression, like foo[*], which consumes + // the rest of the traversal steps after it using a recursive + // call to this function. + p.Read() // consume star + close := p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on splat index", + Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + // Splat expressions use a special "anonymous symbol" as a + // placeholder in an expression to be evaluated once for each + // item in the source expression. + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(open.Range, close.Range), + } + // Now we'll recursively call this same function to eat any + // remaining traversal steps against the anonymous symbol. + travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr) + diags = append(diags, nestedDiags...) + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()), + MarkerRange: hcl.RangeBetween(open.Range, close.Range), + } + + default: + + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() { + litKey, _ := tmpl.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open)) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // The cty.ParseNumberVal is always the same behavior as converting a + // string to a number, ensuring we always interpret decimal numbers in + // the same way. + numVal, err := cty.ParseNumberVal(string(tok.Bytes)) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + // We must temporarily stop looking at newlines here while we check for + // a "for" keyword, since for expressions are _not_ newline-sensitive, + // even though object constructors are. + p.PushIncludeNewlines(false) + isFor := forKeyword.TokenMatches(p.Peek()) + p.PopIncludeNewlines() + if isFor { + return p.finishParsingForExpr(open) + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + // Wrapping parens are not explicitly represented in the AST, but + // we want to use them here to disambiguate intepreting a mapping + // key as a full expression rather than just a name, and so + // we'll remember this was present and use it to force the + // behavior of our final ObjectConsKeyExpr. + forceNonLiteral := (p.Peek().Type == TokenOParen) + + var key Expression + var keyDiags hcl.Diagnostics + key, keyDiags = p.ParseExpression() + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + // We wrap up the key expression in a special wrapper that deals + // with our special case that naked identifiers as object keys + // are interpreted as literal strings. + key = &ObjectConsKeyExpr{ + Wrapped: key, + ForceNonLiteral: forceNonLiteral, + } + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + switch next.Type { + case TokenNewline, TokenComma: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value", + Detail: "Expected an attribute value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + case TokenIdent: + // Although this might just be a plain old missing equals + // sign before a reference, one way to get here is to try + // to write an attribute name containing a period followed + // by a digit, which was valid in HCL1, like this: + // foo1.2_bar = "baz" + // We can't know exactly what the user intended here, but + // we'll augment our message with an extra hint in this case + // in case it is helpful. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires the 'in' keyword after its name declarations.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires a colon after the collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := ParseStringLiteralToken(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "%" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + + // Now that we're returning an error callers won't attempt to use + // the result for any real operations, but they might try to use + // the partial AST for other analyses, so we'll leave a marker + // to indicate that there was something invalid in the string to + // help avoid misinterpretation of the partial result + ret.WriteString(which) + ret.WriteString("{ ... }") + + p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenCQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// ParseStringLiteralToken processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func ParseStringLiteralToken(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("ParseStringLiteralToken can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + slices := scanStringLit(tok.Bytes, quoted) + + // We will mutate rng constantly as we walk through our token slices below. + // Any diagnostics must take a copy of this rng rather than simply pointing + // to it, e.g. by using rng.Ptr() rather than &rng. + rng := tok.Range + rng.End = rng.Start + +Slices: + for _, slice := range slices { + if len(slice) == 0 { + continue + } + + // Advance the start of our range to where the previous token ended + rng.Start = rng.End + + // Advance the end of our range to after our token. + b := slice + for len(b) > 0 { + adv, ch, _ := textseg.ScanGraphemeClusters(b, true) + rng.End.Byte += adv + switch ch[0] { + case '\r', '\n': + rng.End.Line++ + rng.End.Column = 1 + default: + rng.End.Column++ + } + b = b[adv:] + } + + TokenType: + switch slice[0] { + case '\\': + if !quoted { + // If we're not in quoted mode then just treat this token as + // normal. (Slices can still start with backslash even if we're + // not specifically looking for backslash sequences.) + break TokenType + } + if len(slice) < 2 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "Backslash must be followed by an escape sequence selector character.", + Subject: rng.Ptr(), + }) + break TokenType + } + + switch slice[1] { + + case 'n': + ret = append(ret, '\n') + continue Slices + case 'r': + ret = append(ret, '\r') + continue Slices + case 't': + ret = append(ret, '\t') + continue Slices + case '"': + ret = append(ret, '"') + continue Slices + case '\\': + ret = append(ret, '\\') + continue Slices + case 'u', 'U': + if slice[1] == 'u' && len(slice) != 6 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } else if slice[1] == 'U' && len(slice) != 10 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } + + numHex := string(slice[2:]) + num, err := strconv.ParseUint(numHex, 16, 32) + if err != nil { + // Should never happen because the scanner won't match + // a sequence of digits that isn't valid. + panic(err) + } + + r := rune(num) + l := utf8.RuneLen(r) + if l == -1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), + Subject: rng.Ptr(), + }) + break TokenType + } + for i := 0; i < l; i++ { + ret = append(ret, 0) + } + rb := ret[len(ret)-l:] + utf8.EncodeRune(rb, r) + + continue Slices + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), + Subject: rng.Ptr(), + }) + ret = append(ret, slice[1:]...) + continue Slices + } + + case '$', '%': + if len(slice) != 3 { + // Not long enough to be our escape sequence, so it's literal. + break TokenType + } + + if slice[1] == slice[0] && slice[2] == '{' { + ret = append(ret, slice[0]) + ret = append(ret, '{') + continue Slices + } + + break TokenType + } + + // If we fall out here or break out of here from the switch above + // then this slice is just a literal. + ret = append(ret, slice...) + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go new file mode 100644 index 000000000..eb8f7ea38 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go @@ -0,0 +1,799 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF, false) +} + +func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + if flushHeredoc { + flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec + } + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := ParseStringLiteralToken(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is %{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +// flushHeredocTemplateParts modifies in-place the line-leading literal strings +// to apply the flush heredoc processing rule: find the line with the smallest +// number of whitespace characters as prefix and then trim that number of +// characters from all of the lines. +// +// This rule is applied to static tokens rather than to the rendered result, +// so interpolating a string with leading whitespace cannot affect the chosen +// prefix length. +func flushHeredocTemplateParts(parts *templateParts) { + if len(parts.Tokens) == 0 { + // Nothing to do + return + } + + const maxInt = int((^uint(0)) >> 1) + + minSpaces := maxInt + newline := true + var adjust []*templateLiteralToken + for _, ttok := range parts.Tokens { + if newline { + newline = false + var spaces int + if lit, ok := ttok.(*templateLiteralToken); ok { + orig := lit.Val + trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace) + // If a token is entirely spaces and ends with a newline + // then it's a "blank line" and thus not considered for + // space-prefix-counting purposes. + if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") { + spaces = maxInt + } else { + spaceBytes := len(lit.Val) - len(trimmed) + spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters) + adjust = append(adjust, lit) + } + } else if _, ok := ttok.(*templateEndToken); ok { + break // don't process the end token since it never has spaces before it + } + if spaces < minSpaces { + minSpaces = spaces + } + } + if lit, ok := ttok.(*templateLiteralToken); ok { + if strings.HasSuffix(lit.Val, "\n") { + newline = true // The following token, if any, begins a new line + } + } + } + + for _, lit := range adjust { + // Since we want to count space _characters_ rather than space _bytes_, + // we can't just do a straightforward slice operation here and instead + // need to hunt for the split point with a scanner. + valBytes := []byte(lit.Val) + spaceByteCount := 0 + for i := 0; i < minSpaces; i++ { + adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true) + spaceByteCount += adv + valBytes = valBytes[adv:] + } + lit.Val = lit.Val[spaceByteCount:] + lit.SrcRange.Start.Column += minSpaces + lit.SrcRange.Start.Byte += spaceByteCount + } +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go new file mode 100644 index 000000000..7dcb0fd34 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go new file mode 100644 index 000000000..f056f906e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go @@ -0,0 +1,212 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// This is set to true at init() time in tests, to enable more useful output +// if a stack discipline error is detected. It should not be enabled in +// normal mode since there is a performance penalty from accessing the +// runtime stack to produce the traces, but could be temporarily set to +// true for debugging if desired. +var tracePeekerNewlinesStack = false + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool + + // used only when tracePeekerNewlinesStack is set + newlineStackChanges []peekerNewlineStackChange +} + +// for use in debugging the stack usage only +type peekerNewlineStackChange struct { + Pushing bool // if false, then popping + Frame runtime.Frame + Include bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + true, frame, include, + }) + } + + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + false, frame, ret, + }) + } + + return ret +} + +// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing +// panicking if it is not. This can be used to catch stack mismanagement that +// might otherwise just cause confusing downstream errors. +// +// This function is a no-op if the stack is empty when called. +// +// If newlines stack tracing is enabled by setting the global variable +// tracePeekerNewlinesStack at init time, a full log of all of the push/pop +// calls will be produced to help identify which caller in the parser is +// misbehaving. +func (p *peeker) AssertEmptyIncludeNewlinesStack() { + if len(p.IncludeNewlinesStack) != 1 { + // Should never happen; indicates mismanagement of the stack inside + // the parser. + if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above + panic(fmt.Errorf( + "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s", + len(p.IncludeNewlinesStack)-1, + formatPeekerNewlineStackChanges(p.newlineStackChanges), + )) + } else { + panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack)) + } + } +} + +func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string { + indent := 0 + var buf bytes.Buffer + for _, change := range changes { + funcName := change.Frame.Function + if idx := strings.LastIndexByte(funcName, '.'); idx != -1 { + funcName = funcName[idx+1:] + } + filename := change.Frame.File + if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 { + filename = filename[idx+1:] + } + + switch change.Pushing { + + case true: + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + indent++ + + case false: + indent-- + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + + } + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go new file mode 100644 index 000000000..0b68efd60 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go @@ -0,0 +1,171 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" +) + +// ParseConfig parses the given buffer as a whole HCL config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *hclsyntax.Body, so callers +// may freely type-assert this to get access to the full hclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone HCL expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone HCL template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole HCL config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone HCL expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone HCL template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// ValidIdentifier tests if the given string could be a valid identifier in +// a native syntax expression. +// +// This is useful when accepting names from the user that will be used as +// variable or attribute names in the scope, to ensure that any name chosen +// will be traversable using the variable or attribute traversal syntax. +func ValidIdentifier(s string) bool { + // This is a kinda-expensive way to do something pretty simple, but it + // is easiest to do with our existing scanner-related infrastructure here + // and nobody should be validating identifiers in a tight loop. + tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly) + return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go new file mode 100644 index 000000000..2895ade75 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go @@ -0,0 +1,301 @@ +//line scan_string_lit.rl:1 + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. + +//line scan_string_lit.go:9 +var _hclstrtok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 2, 1, 0, +} + +var _hclstrtok_key_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 10, 14, 18, + 22, 27, 31, 36, 41, 46, 51, 57, + 62, 74, 85, 96, 107, 118, 129, 140, + 151, +} + +var _hclstrtok_trans_keys []byte = []byte{ + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 123, 10, 13, 36, 37, 10, + 13, 36, 37, 92, 10, 13, 36, 37, + 92, 10, 13, 36, 37, 92, 10, 13, + 36, 37, 92, 10, 13, 36, 37, 92, + 123, 10, 13, 36, 37, 92, 85, 117, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 92, 48, + 57, 65, 70, 97, 102, 10, 13, 36, + 37, 92, 48, 57, 65, 70, 97, 102, + 10, 13, 36, 37, 92, 48, 57, 65, + 70, 97, 102, 10, 13, 36, 37, 92, + 48, 57, 65, 70, 97, 102, 10, 13, + 36, 37, 92, 48, 57, 65, 70, 97, + 102, 10, 13, 36, 37, 92, 48, 57, + 65, 70, 97, 102, 10, 13, 36, 37, + 92, 48, 57, 65, 70, 97, 102, 10, + 13, 36, 37, 92, 48, 57, 65, 70, + 97, 102, +} + +var _hclstrtok_single_lengths []byte = []byte{ + 0, 0, 0, 0, 4, 4, 4, 4, + 5, 4, 5, 5, 5, 5, 6, 5, + 2, 5, 5, 5, 5, 5, 5, 5, + 5, +} + +var _hclstrtok_range_lengths []byte = []byte{ + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +var _hclstrtok_index_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 11, 16, 21, + 26, 32, 37, 43, 49, 55, 61, 68, + 74, 82, 91, 100, 109, 118, 127, 136, + 145, +} + +var _hclstrtok_indicies []byte = []byte{ + 0, 1, 2, 1, 3, 1, 5, 6, + 7, 8, 4, 10, 11, 12, 13, 9, + 14, 11, 12, 13, 9, 10, 11, 15, + 13, 9, 10, 11, 12, 13, 14, 9, + 10, 11, 12, 15, 9, 17, 18, 19, + 20, 21, 16, 23, 24, 25, 26, 27, + 22, 0, 24, 25, 26, 27, 22, 23, + 24, 28, 26, 27, 22, 23, 24, 25, + 26, 27, 0, 22, 23, 24, 25, 28, + 27, 22, 29, 30, 22, 2, 3, 31, + 22, 0, 23, 24, 25, 26, 27, 32, + 32, 32, 22, 23, 24, 25, 26, 27, + 33, 33, 33, 22, 23, 24, 25, 26, + 27, 34, 34, 34, 22, 23, 24, 25, + 26, 27, 30, 30, 30, 22, 23, 24, + 25, 26, 27, 35, 35, 35, 22, 23, + 24, 25, 26, 27, 36, 36, 36, 22, + 23, 24, 25, 26, 27, 37, 37, 37, + 22, 23, 24, 25, 26, 27, 0, 0, + 0, 22, +} + +var _hclstrtok_trans_targs []byte = []byte{ + 11, 0, 1, 2, 4, 5, 6, 7, + 9, 4, 5, 6, 7, 9, 5, 8, + 10, 11, 12, 13, 15, 16, 10, 11, + 12, 13, 15, 16, 14, 17, 21, 3, + 18, 19, 20, 22, 23, 24, +} + +var _hclstrtok_trans_actions []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 3, 5, 5, 5, 5, 0, 0, + 0, 1, 1, 1, 1, 1, 3, 5, + 5, 5, 5, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hclstrtok_eof_actions []byte = []byte{ + 0, 0, 0, 0, 0, 3, 3, 3, + 3, 3, 0, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +const hclstrtok_start int = 4 +const hclstrtok_first_final int = 4 +const hclstrtok_error int = 0 + +const hclstrtok_en_quoted int = 10 +const hclstrtok_en_unquoted int = 4 + +//line scan_string_lit.rl:10 + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + +//line scan_string_lit.rl:61 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + +//line scan_string_lit.go:154 + { + } + +//line scan_string_lit.go:158 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_hclstrtok_key_offsets[cs]) + _trans = int(_hclstrtok_index_offsets[cs]) + + _klen = int(_hclstrtok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hclstrtok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hclstrtok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hclstrtok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hclstrtok_indicies[_trans]) + cs = int(_hclstrtok_trans_targs[_trans]) + + if _hclstrtok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hclstrtok_trans_actions[_trans]) + _nacts = uint(_hclstrtok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hclstrtok_actions[_acts-1] { + case 0: +//line scan_string_lit.rl:40 + + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p + + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:253 + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _hclstrtok_eof_actions[cs] + __nacts := uint(_hclstrtok_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _hclstrtok_actions[__acts-1] { + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:278 + } + } + } + + _out: + { + } + } + +//line scan_string_lit.rl:89 + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl new file mode 100644 index 000000000..f8ac11751 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl @@ -0,0 +1,105 @@ + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_string_lit.rl here, so edit away!) + + machine hclstrtok; + write data; +}%% + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BadUTF8 = any - AnyUTF8; + + Hex = ('0'..'9' | 'a'..'f' | 'A'..'F'); + + # Our goal with this patterns is to capture user intent as best as + # possible, even if the input is invalid. The caller will then verify + # whether each token is valid and generate suitable error messages + # if not. + UnicodeEscapeShort = "\\u" . Hex{0,4}; + UnicodeEscapeLong = "\\U" . Hex{0,8}; + UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong); + SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?; + TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?); + Newline = ("\r\n" | "\r" | "\n"); + + action Begin { + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p; + } + action End { + te = p; + ret = append(ret, data[ts:te]); + } + + QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End; + UnquotedToken = (TemplateEscape | Newline) >Begin %End; + QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n")); + UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n")); + + quoted := (QuotedToken | QuotedLiteral)**; + unquoted := (UnquotedToken | UnquotedLiteral)**; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + %%{ + write init nocs; + write exec; + }%% + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go new file mode 100644 index 000000000..794123a85 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go @@ -0,0 +1,5265 @@ +//line scan_tokens.rl:1 + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl/v2" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +//line scan_tokens.go:15 +var _hcltok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 3, 1, 4, + 1, 7, 1, 8, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 23, 1, 24, + 1, 25, 1, 26, 1, 27, 1, 28, + 1, 29, 1, 30, 1, 31, 1, 32, + 1, 35, 1, 36, 1, 37, 1, 38, + 1, 39, 1, 40, 1, 41, 1, 42, + 1, 43, 1, 44, 1, 47, 1, 48, + 1, 49, 1, 50, 1, 51, 1, 52, + 1, 53, 1, 56, 1, 57, 1, 58, + 1, 59, 1, 60, 1, 61, 1, 62, + 1, 63, 1, 64, 1, 65, 1, 66, + 1, 67, 1, 68, 1, 69, 1, 70, + 1, 71, 1, 72, 1, 73, 1, 74, + 1, 75, 1, 76, 1, 77, 1, 78, + 1, 79, 1, 80, 1, 81, 1, 82, + 1, 83, 1, 84, 1, 85, 2, 0, + 14, 2, 0, 25, 2, 0, 29, 2, + 0, 37, 2, 0, 41, 2, 1, 2, + 2, 4, 5, 2, 4, 6, 2, 4, + 21, 2, 4, 22, 2, 4, 33, 2, + 4, 34, 2, 4, 45, 2, 4, 46, + 2, 4, 54, 2, 4, 55, +} + +var _hcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 4, 9, 13, 15, + 57, 98, 144, 145, 149, 155, 155, 157, + 159, 168, 174, 181, 182, 185, 186, 190, + 195, 204, 208, 212, 220, 222, 224, 226, + 229, 261, 263, 265, 269, 273, 276, 287, + 300, 319, 332, 348, 360, 376, 391, 412, + 422, 434, 445, 459, 474, 484, 496, 505, + 517, 519, 523, 544, 553, 563, 569, 575, + 576, 625, 627, 631, 633, 639, 646, 654, + 661, 664, 670, 674, 678, 680, 684, 688, + 692, 698, 706, 714, 720, 722, 726, 728, + 734, 738, 742, 746, 750, 755, 762, 768, + 770, 772, 776, 778, 784, 788, 792, 802, + 807, 821, 836, 838, 846, 848, 853, 867, + 872, 874, 878, 879, 883, 889, 895, 905, + 915, 926, 934, 937, 940, 944, 948, 950, + 953, 953, 956, 958, 988, 990, 992, 996, + 1001, 1005, 1010, 1012, 1014, 1016, 1025, 1029, + 1033, 1039, 1041, 1049, 1057, 1069, 1072, 1078, + 1082, 1084, 1088, 1108, 1110, 1112, 1123, 1129, + 1131, 1133, 1135, 1139, 1145, 1151, 1153, 1158, + 1162, 1164, 1172, 1190, 1230, 1240, 1244, 1246, + 1248, 1249, 1253, 1257, 1261, 1265, 1269, 1274, + 1278, 1282, 1286, 1288, 1290, 1294, 1304, 1308, + 1310, 1314, 1318, 1322, 1335, 1337, 1339, 1343, + 1345, 1349, 1351, 1353, 1383, 1387, 1391, 1395, + 1398, 1405, 1410, 1421, 1425, 1441, 1455, 1459, + 1464, 1468, 1472, 1478, 1480, 1486, 1488, 1492, + 1494, 1500, 1505, 1510, 1520, 1522, 1524, 1528, + 1532, 1534, 1547, 1549, 1553, 1557, 1565, 1567, + 1571, 1573, 1574, 1577, 1582, 1584, 1586, 1590, + 1592, 1596, 1602, 1622, 1628, 1634, 1636, 1637, + 1647, 1648, 1656, 1663, 1665, 1668, 1670, 1672, + 1674, 1679, 1683, 1687, 1692, 1702, 1712, 1716, + 1720, 1734, 1760, 1770, 1772, 1774, 1777, 1779, + 1782, 1784, 1788, 1790, 1791, 1795, 1797, 1800, + 1807, 1815, 1817, 1819, 1823, 1825, 1831, 1842, + 1845, 1847, 1851, 1856, 1886, 1891, 1893, 1896, + 1901, 1915, 1922, 1936, 1941, 1954, 1958, 1971, + 1976, 1994, 1995, 2004, 2008, 2020, 2025, 2032, + 2039, 2046, 2048, 2052, 2074, 2079, 2080, 2084, + 2086, 2136, 2139, 2150, 2154, 2156, 2162, 2168, + 2170, 2175, 2177, 2181, 2183, 2184, 2186, 2188, + 2194, 2196, 2198, 2202, 2208, 2221, 2223, 2229, + 2233, 2241, 2252, 2260, 2263, 2293, 2299, 2302, + 2307, 2309, 2313, 2317, 2321, 2323, 2330, 2332, + 2341, 2348, 2356, 2358, 2378, 2390, 2394, 2396, + 2414, 2453, 2455, 2459, 2461, 2468, 2472, 2500, + 2502, 2504, 2506, 2508, 2511, 2513, 2517, 2521, + 2523, 2526, 2528, 2530, 2533, 2535, 2537, 2538, + 2540, 2542, 2546, 2550, 2553, 2566, 2568, 2574, + 2578, 2580, 2584, 2588, 2602, 2605, 2614, 2616, + 2620, 2626, 2626, 2628, 2630, 2639, 2645, 2652, + 2653, 2656, 2657, 2661, 2666, 2675, 2679, 2683, + 2691, 2693, 2695, 2697, 2700, 2732, 2734, 2736, + 2740, 2744, 2747, 2758, 2771, 2790, 2803, 2819, + 2831, 2847, 2862, 2883, 2893, 2905, 2916, 2930, + 2945, 2955, 2967, 2976, 2988, 2990, 2994, 3015, + 3024, 3034, 3040, 3046, 3047, 3096, 3098, 3102, + 3104, 3110, 3117, 3125, 3132, 3135, 3141, 3145, + 3149, 3151, 3155, 3159, 3163, 3169, 3177, 3185, + 3191, 3193, 3197, 3199, 3205, 3209, 3213, 3217, + 3221, 3226, 3233, 3239, 3241, 3243, 3247, 3249, + 3255, 3259, 3263, 3273, 3278, 3292, 3307, 3309, + 3317, 3319, 3324, 3338, 3343, 3345, 3349, 3350, + 3354, 3360, 3366, 3376, 3386, 3397, 3405, 3408, + 3411, 3415, 3419, 3421, 3424, 3424, 3427, 3429, + 3459, 3461, 3463, 3467, 3472, 3476, 3481, 3483, + 3485, 3487, 3496, 3500, 3504, 3510, 3512, 3520, + 3528, 3540, 3543, 3549, 3553, 3555, 3559, 3579, + 3581, 3583, 3594, 3600, 3602, 3604, 3606, 3610, + 3616, 3622, 3624, 3629, 3633, 3635, 3643, 3661, + 3701, 3711, 3715, 3717, 3719, 3720, 3724, 3728, + 3732, 3736, 3740, 3745, 3749, 3753, 3757, 3759, + 3761, 3765, 3775, 3779, 3781, 3785, 3789, 3793, + 3806, 3808, 3810, 3814, 3816, 3820, 3822, 3824, + 3854, 3858, 3862, 3866, 3869, 3876, 3881, 3892, + 3896, 3912, 3926, 3930, 3935, 3939, 3943, 3949, + 3951, 3957, 3959, 3963, 3965, 3971, 3976, 3981, + 3991, 3993, 3995, 3999, 4003, 4005, 4018, 4020, + 4024, 4028, 4036, 4038, 4042, 4044, 4045, 4048, + 4053, 4055, 4057, 4061, 4063, 4067, 4073, 4093, + 4099, 4105, 4107, 4108, 4118, 4119, 4127, 4134, + 4136, 4139, 4141, 4143, 4145, 4150, 4154, 4158, + 4163, 4173, 4183, 4187, 4191, 4205, 4231, 4241, + 4243, 4245, 4248, 4250, 4253, 4255, 4259, 4261, + 4262, 4266, 4268, 4270, 4277, 4281, 4288, 4295, + 4304, 4320, 4332, 4350, 4361, 4373, 4381, 4399, + 4407, 4437, 4440, 4450, 4460, 4472, 4483, 4492, + 4505, 4517, 4521, 4527, 4554, 4563, 4566, 4571, + 4577, 4582, 4603, 4607, 4613, 4613, 4620, 4629, + 4637, 4640, 4644, 4650, 4656, 4659, 4663, 4670, + 4676, 4685, 4694, 4698, 4702, 4706, 4710, 4717, + 4721, 4725, 4735, 4741, 4745, 4751, 4755, 4758, + 4764, 4770, 4782, 4786, 4790, 4800, 4804, 4815, + 4817, 4819, 4823, 4835, 4840, 4864, 4868, 4874, + 4896, 4905, 4909, 4912, 4913, 4921, 4929, 4935, + 4945, 4952, 4970, 4973, 4976, 4984, 4990, 4994, + 4998, 5002, 5008, 5016, 5021, 5027, 5031, 5039, + 5046, 5050, 5057, 5063, 5071, 5079, 5085, 5091, + 5102, 5106, 5118, 5127, 5144, 5161, 5164, 5168, + 5170, 5176, 5178, 5182, 5197, 5201, 5205, 5209, + 5213, 5217, 5219, 5225, 5230, 5234, 5240, 5247, + 5250, 5268, 5270, 5315, 5321, 5327, 5331, 5335, + 5341, 5345, 5351, 5357, 5364, 5366, 5372, 5378, + 5382, 5386, 5394, 5407, 5413, 5420, 5428, 5434, + 5443, 5449, 5453, 5458, 5462, 5470, 5474, 5478, + 5508, 5514, 5520, 5526, 5532, 5539, 5545, 5552, + 5557, 5567, 5571, 5578, 5584, 5588, 5595, 5599, + 5605, 5608, 5612, 5616, 5620, 5624, 5629, 5634, + 5638, 5649, 5653, 5657, 5663, 5671, 5675, 5692, + 5696, 5702, 5712, 5718, 5724, 5727, 5732, 5741, + 5745, 5749, 5755, 5759, 5765, 5773, 5791, 5792, + 5802, 5803, 5812, 5820, 5822, 5825, 5827, 5829, + 5831, 5836, 5849, 5853, 5868, 5897, 5908, 5910, + 5914, 5918, 5923, 5927, 5929, 5936, 5940, 5948, + 5952, 5964, 5966, 5968, 5970, 5972, 5974, 5975, + 5977, 5979, 5981, 5983, 5985, 5986, 5988, 5990, + 5992, 5994, 5996, 6000, 6006, 6006, 6008, 6010, + 6019, 6025, 6032, 6033, 6036, 6037, 6041, 6046, + 6055, 6059, 6063, 6071, 6073, 6075, 6077, 6080, + 6112, 6114, 6116, 6120, 6124, 6127, 6138, 6151, + 6170, 6183, 6199, 6211, 6227, 6242, 6263, 6273, + 6285, 6296, 6310, 6325, 6335, 6347, 6356, 6368, + 6370, 6374, 6395, 6404, 6414, 6420, 6426, 6427, + 6476, 6478, 6482, 6484, 6490, 6497, 6505, 6512, + 6515, 6521, 6525, 6529, 6531, 6535, 6539, 6543, + 6549, 6557, 6565, 6571, 6573, 6577, 6579, 6585, + 6589, 6593, 6597, 6601, 6606, 6613, 6619, 6621, + 6623, 6627, 6629, 6635, 6639, 6643, 6653, 6658, + 6672, 6687, 6689, 6697, 6699, 6704, 6718, 6723, + 6725, 6729, 6730, 6734, 6740, 6746, 6756, 6766, + 6777, 6785, 6788, 6791, 6795, 6799, 6801, 6804, + 6804, 6807, 6809, 6839, 6841, 6843, 6847, 6852, + 6856, 6861, 6863, 6865, 6867, 6876, 6880, 6884, + 6890, 6892, 6900, 6908, 6920, 6923, 6929, 6933, + 6935, 6939, 6959, 6961, 6963, 6974, 6980, 6982, + 6984, 6986, 6990, 6996, 7002, 7004, 7009, 7013, + 7015, 7023, 7041, 7081, 7091, 7095, 7097, 7099, + 7100, 7104, 7108, 7112, 7116, 7120, 7125, 7129, + 7133, 7137, 7139, 7141, 7145, 7155, 7159, 7161, + 7165, 7169, 7173, 7186, 7188, 7190, 7194, 7196, + 7200, 7202, 7204, 7234, 7238, 7242, 7246, 7249, + 7256, 7261, 7272, 7276, 7292, 7306, 7310, 7315, + 7319, 7323, 7329, 7331, 7337, 7339, 7343, 7345, + 7351, 7356, 7361, 7371, 7373, 7375, 7379, 7383, + 7385, 7398, 7400, 7404, 7408, 7416, 7418, 7422, + 7424, 7425, 7428, 7433, 7435, 7437, 7441, 7443, + 7447, 7453, 7473, 7479, 7485, 7487, 7488, 7498, + 7499, 7507, 7514, 7516, 7519, 7521, 7523, 7525, + 7530, 7534, 7538, 7543, 7553, 7563, 7567, 7571, + 7585, 7611, 7621, 7623, 7625, 7628, 7630, 7633, + 7635, 7639, 7641, 7642, 7646, 7648, 7650, 7657, + 7661, 7668, 7675, 7684, 7700, 7712, 7730, 7741, + 7753, 7761, 7779, 7787, 7817, 7820, 7830, 7840, + 7852, 7863, 7872, 7885, 7897, 7901, 7907, 7934, + 7943, 7946, 7951, 7957, 7962, 7983, 7987, 7993, + 7993, 8000, 8009, 8017, 8020, 8024, 8030, 8036, + 8039, 8043, 8050, 8056, 8065, 8074, 8078, 8082, + 8086, 8090, 8097, 8101, 8105, 8115, 8121, 8125, + 8131, 8135, 8138, 8144, 8150, 8162, 8166, 8170, + 8180, 8184, 8195, 8197, 8199, 8203, 8215, 8220, + 8244, 8248, 8254, 8276, 8285, 8289, 8292, 8293, + 8301, 8309, 8315, 8325, 8332, 8350, 8353, 8356, + 8364, 8370, 8374, 8378, 8382, 8388, 8396, 8401, + 8407, 8411, 8419, 8426, 8430, 8437, 8443, 8451, + 8459, 8465, 8471, 8482, 8486, 8498, 8507, 8524, + 8541, 8544, 8548, 8550, 8556, 8558, 8562, 8577, + 8581, 8585, 8589, 8593, 8597, 8599, 8605, 8610, + 8614, 8620, 8627, 8630, 8648, 8650, 8695, 8701, + 8707, 8711, 8715, 8721, 8725, 8731, 8737, 8744, + 8746, 8752, 8758, 8762, 8766, 8774, 8787, 8793, + 8800, 8808, 8814, 8823, 8829, 8833, 8838, 8842, + 8850, 8854, 8858, 8888, 8894, 8900, 8906, 8912, + 8919, 8925, 8932, 8937, 8947, 8951, 8958, 8964, + 8968, 8975, 8979, 8985, 8988, 8992, 8996, 9000, + 9004, 9009, 9014, 9018, 9029, 9033, 9037, 9043, + 9051, 9055, 9072, 9076, 9082, 9092, 9098, 9104, + 9107, 9112, 9121, 9125, 9129, 9135, 9139, 9145, + 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202, + 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248, + 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309, + 9316, 9320, 9328, 9332, 9407, 9409, 9410, 9411, + 9412, 9413, 9414, 9416, 9421, 9423, 9425, 9426, + 9470, 9471, 9472, 9474, 9479, 9483, 9483, 9485, + 9487, 9498, 9508, 9516, 9517, 9519, 9520, 9524, + 9528, 9538, 9542, 9549, 9560, 9567, 9571, 9577, + 9588, 9620, 9669, 9684, 9699, 9704, 9706, 9711, + 9743, 9751, 9753, 9775, 9797, 9799, 9815, 9831, + 9833, 9835, 9835, 9836, 9837, 9838, 9840, 9841, + 9853, 9855, 9857, 9859, 9873, 9887, 9889, 9892, + 9895, 9897, 9898, 9899, 9901, 9903, 9905, 9919, + 9933, 9935, 9938, 9941, 9943, 9944, 9945, 9947, + 9949, 9951, 10000, 10044, 10046, 10051, 10055, 10055, + 10057, 10059, 10070, 10080, 10088, 10089, 10091, 10092, + 10096, 10100, 10110, 10114, 10121, 10132, 10139, 10143, + 10149, 10160, 10192, 10241, 10256, 10271, 10276, 10278, + 10283, 10315, 10323, 10325, 10347, 10369, +} + +var _hcltok_trans_keys []byte = []byte{ + 46, 42, 42, 47, 46, 69, 101, 48, + 57, 43, 45, 48, 57, 48, 57, 45, + 95, 194, 195, 198, 199, 203, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 239, 240, 65, + 90, 97, 122, 196, 202, 208, 218, 229, + 236, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 10, 13, 45, 95, 194, 195, + 198, 199, 203, 204, 205, 206, 207, 210, + 212, 213, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, + 233, 234, 237, 239, 240, 243, 48, 57, + 65, 90, 97, 122, 196, 218, 229, 236, + 10, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 170, 181, 186, + 191, 176, 180, 182, 183, 186, 189, 134, + 140, 136, 138, 142, 161, 163, 255, 130, + 137, 136, 255, 144, 170, 176, 178, 160, + 191, 128, 138, 174, 175, 177, 255, 148, + 150, 164, 167, 173, 176, 185, 189, 190, + 192, 255, 144, 146, 175, 141, 255, 166, + 176, 178, 255, 186, 138, 170, 180, 181, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 184, 186, + 187, 188, 189, 190, 183, 185, 154, 164, + 168, 128, 149, 128, 152, 189, 132, 185, + 144, 152, 161, 177, 255, 169, 177, 129, + 132, 141, 142, 145, 146, 179, 181, 186, + 188, 190, 255, 142, 156, 157, 159, 161, + 176, 177, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 158, 153, 156, 178, 180, 189, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 144, 185, 160, 161, 189, 133, + 140, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 185, 177, 156, 157, 159, 161, + 131, 156, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 144, 189, 133, 140, 142, 144, + 146, 168, 170, 185, 152, 154, 160, 161, + 128, 189, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 158, 160, 161, 177, + 178, 189, 133, 140, 142, 144, 146, 186, + 142, 148, 150, 159, 161, 186, 191, 189, + 133, 150, 154, 177, 179, 187, 128, 134, + 129, 176, 178, 179, 132, 138, 141, 165, + 167, 189, 129, 130, 135, 136, 148, 151, + 153, 159, 161, 163, 170, 171, 173, 176, + 178, 179, 134, 128, 132, 156, 159, 128, + 128, 135, 137, 172, 136, 140, 128, 129, + 130, 131, 137, 138, 139, 140, 141, 142, + 143, 144, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 188, 189, 190, 191, 132, 152, 185, 187, + 191, 128, 170, 161, 144, 149, 154, 157, + 165, 166, 174, 176, 181, 255, 130, 141, + 143, 159, 155, 255, 128, 140, 142, 145, + 160, 177, 128, 145, 160, 172, 174, 176, + 151, 156, 170, 128, 168, 176, 255, 138, + 255, 128, 150, 160, 255, 149, 255, 167, + 133, 179, 133, 139, 131, 160, 174, 175, + 186, 255, 166, 255, 128, 163, 141, 143, + 154, 189, 169, 172, 174, 177, 181, 182, + 129, 130, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 177, 191, 165, + 170, 175, 177, 180, 255, 168, 174, 176, + 255, 128, 134, 136, 142, 144, 150, 152, + 158, 128, 129, 130, 131, 132, 133, 134, + 135, 144, 145, 255, 133, 135, 161, 169, + 177, 181, 184, 188, 160, 151, 154, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 143, 160, + 169, 172, 255, 191, 128, 174, 130, 134, + 139, 163, 255, 130, 179, 187, 189, 178, + 183, 138, 165, 176, 255, 135, 159, 189, + 255, 132, 178, 143, 160, 164, 166, 175, + 186, 190, 128, 168, 186, 128, 130, 132, + 139, 160, 182, 190, 255, 176, 178, 180, + 183, 184, 190, 255, 128, 130, 155, 157, + 160, 170, 178, 180, 128, 162, 164, 169, + 171, 172, 173, 174, 175, 180, 181, 182, + 183, 185, 186, 187, 188, 189, 190, 191, + 165, 179, 157, 190, 128, 134, 147, 151, + 159, 168, 170, 182, 184, 188, 176, 180, + 182, 255, 161, 186, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 145, 255, 139, + 143, 182, 255, 158, 175, 128, 144, 147, + 149, 151, 153, 179, 128, 135, 137, 164, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 131, 183, 131, 175, + 144, 168, 131, 166, 182, 144, 178, 131, + 178, 154, 156, 129, 132, 128, 145, 147, + 171, 159, 255, 144, 157, 161, 135, 138, + 128, 175, 135, 132, 133, 128, 174, 152, + 155, 132, 128, 170, 128, 153, 160, 190, + 192, 255, 128, 136, 138, 174, 128, 178, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 144, 173, + 128, 131, 163, 183, 189, 255, 133, 143, + 145, 255, 147, 159, 128, 176, 177, 178, + 128, 136, 144, 153, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 150, 153, 131, 140, 255, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 133, 255, + 170, 181, 183, 186, 128, 150, 152, 182, + 184, 255, 192, 255, 128, 255, 173, 130, + 133, 146, 159, 165, 171, 175, 255, 181, + 190, 184, 185, 192, 255, 140, 134, 138, + 142, 161, 163, 255, 182, 130, 136, 137, + 176, 151, 152, 154, 160, 190, 136, 144, + 192, 255, 135, 129, 130, 132, 133, 144, + 170, 176, 178, 144, 154, 160, 191, 128, + 169, 174, 255, 148, 169, 157, 158, 189, + 190, 192, 255, 144, 255, 139, 140, 178, + 255, 186, 128, 181, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 128, 173, 128, 155, + 160, 180, 182, 189, 148, 161, 163, 255, + 176, 164, 165, 132, 169, 177, 141, 142, + 145, 146, 179, 181, 186, 187, 158, 133, + 134, 137, 138, 143, 150, 152, 155, 164, + 165, 178, 255, 188, 129, 131, 133, 138, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 182, 184, 185, 190, 255, 157, 131, + 134, 137, 138, 142, 144, 146, 152, 159, + 165, 182, 255, 129, 131, 133, 141, 143, + 145, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 255, 134, 138, 142, 143, 145, + 159, 164, 165, 176, 184, 186, 255, 129, + 131, 133, 140, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 191, 177, + 128, 132, 135, 136, 139, 141, 150, 151, + 156, 157, 159, 163, 166, 175, 156, 130, + 131, 133, 138, 142, 144, 146, 149, 153, + 154, 158, 159, 163, 164, 168, 170, 174, + 185, 190, 191, 144, 151, 128, 130, 134, + 136, 138, 141, 166, 175, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 185, 189, + 255, 133, 137, 151, 142, 148, 155, 159, + 164, 165, 176, 255, 128, 131, 133, 140, + 142, 144, 146, 168, 170, 179, 181, 185, + 188, 191, 158, 128, 132, 134, 136, 138, + 141, 149, 150, 160, 163, 166, 175, 177, + 178, 129, 131, 133, 140, 142, 144, 146, + 186, 189, 255, 133, 137, 143, 147, 152, + 158, 164, 165, 176, 185, 192, 255, 189, + 130, 131, 133, 150, 154, 177, 179, 187, + 138, 150, 128, 134, 143, 148, 152, 159, + 166, 175, 178, 179, 129, 186, 128, 142, + 144, 153, 132, 138, 141, 165, 167, 129, + 130, 135, 136, 148, 151, 153, 159, 161, + 163, 170, 171, 173, 185, 187, 189, 134, + 128, 132, 136, 141, 144, 153, 156, 159, + 128, 181, 183, 185, 152, 153, 160, 169, + 190, 191, 128, 135, 137, 172, 177, 191, + 128, 132, 134, 151, 153, 188, 134, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 173, 175, 176, + 177, 178, 179, 181, 182, 183, 188, 189, + 190, 191, 132, 152, 172, 184, 185, 187, + 128, 191, 128, 137, 144, 255, 158, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 142, + 143, 182, 183, 191, 255, 128, 130, 133, + 136, 150, 152, 255, 145, 150, 151, 155, + 156, 160, 168, 178, 255, 128, 143, 160, + 255, 182, 183, 190, 255, 129, 255, 173, + 174, 192, 255, 129, 154, 160, 255, 171, + 173, 185, 255, 128, 140, 142, 148, 160, + 180, 128, 147, 160, 172, 174, 176, 178, + 179, 148, 150, 152, 155, 158, 159, 170, + 255, 139, 141, 144, 153, 160, 255, 184, + 255, 128, 170, 176, 255, 182, 255, 128, + 158, 160, 171, 176, 187, 134, 173, 176, + 180, 128, 171, 176, 255, 138, 143, 155, + 255, 128, 155, 160, 255, 159, 189, 190, + 192, 255, 167, 128, 137, 144, 153, 176, + 189, 140, 143, 154, 170, 180, 255, 180, + 255, 128, 183, 128, 137, 141, 189, 128, + 136, 144, 146, 148, 182, 184, 185, 128, + 181, 187, 191, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 255, 190, 128, 180, 182, 188, 130, 132, + 134, 140, 144, 147, 150, 155, 160, 172, + 178, 180, 182, 188, 128, 129, 130, 131, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 191, 255, 129, 147, 149, + 176, 178, 190, 192, 255, 144, 156, 161, + 144, 156, 165, 176, 130, 135, 149, 164, + 166, 168, 138, 147, 152, 157, 170, 185, + 188, 191, 142, 133, 137, 160, 255, 137, + 255, 128, 174, 176, 255, 159, 165, 170, + 180, 255, 167, 173, 128, 165, 176, 255, + 168, 174, 176, 190, 192, 255, 128, 150, + 160, 166, 168, 174, 176, 182, 184, 190, + 128, 134, 136, 142, 144, 150, 152, 158, + 160, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 175, 177, 181, 184, 188, 160, 151, 152, + 187, 192, 255, 133, 173, 177, 255, 143, + 159, 187, 255, 176, 191, 182, 183, 184, + 191, 192, 255, 150, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 255, 141, 255, 144, 189, 141, + 143, 172, 255, 191, 128, 175, 180, 189, + 151, 159, 162, 255, 175, 137, 138, 184, + 255, 183, 255, 168, 255, 128, 179, 188, + 134, 143, 154, 159, 184, 186, 190, 255, + 128, 173, 176, 255, 148, 159, 189, 255, + 129, 142, 154, 159, 191, 255, 128, 182, + 128, 141, 144, 153, 160, 182, 186, 255, + 128, 130, 155, 157, 160, 175, 178, 182, + 129, 134, 137, 142, 145, 150, 160, 166, + 168, 174, 176, 255, 155, 166, 175, 128, + 170, 172, 173, 176, 185, 158, 159, 160, + 255, 164, 175, 135, 138, 188, 255, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 165, 186, 174, 175, 154, 255, 190, + 128, 134, 147, 151, 157, 168, 170, 182, + 184, 188, 128, 129, 131, 132, 134, 255, + 147, 255, 190, 255, 144, 145, 136, 175, + 188, 255, 128, 143, 160, 175, 179, 180, + 141, 143, 176, 180, 182, 255, 189, 255, + 191, 144, 153, 161, 186, 129, 154, 166, + 255, 191, 255, 130, 135, 138, 143, 146, + 151, 154, 156, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 161, 169, 128, 129, 130, + 131, 133, 135, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 128, 139, 141, + 166, 168, 186, 188, 189, 191, 255, 142, + 143, 158, 255, 187, 255, 128, 180, 189, + 128, 156, 160, 255, 145, 159, 161, 255, + 128, 159, 176, 255, 139, 143, 187, 255, + 128, 157, 160, 255, 144, 132, 135, 150, + 255, 158, 159, 170, 175, 148, 151, 188, + 255, 128, 167, 176, 255, 164, 255, 183, + 255, 128, 149, 160, 167, 136, 188, 128, + 133, 138, 181, 183, 184, 191, 255, 150, + 159, 183, 255, 128, 158, 160, 178, 180, + 181, 128, 149, 160, 185, 128, 183, 190, + 191, 191, 128, 131, 133, 134, 140, 147, + 149, 151, 153, 179, 184, 186, 160, 188, + 128, 156, 128, 135, 137, 166, 128, 181, + 128, 149, 160, 178, 128, 145, 128, 178, + 129, 130, 131, 132, 133, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 155, 156, 162, 163, + 171, 176, 177, 178, 128, 134, 135, 165, + 176, 190, 144, 168, 176, 185, 128, 180, + 182, 191, 182, 144, 179, 155, 133, 137, + 141, 143, 157, 255, 190, 128, 145, 147, + 183, 136, 128, 134, 138, 141, 143, 157, + 159, 168, 176, 255, 171, 175, 186, 255, + 128, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 144, 151, 128, 132, 135, 136, 139, 141, + 157, 163, 166, 172, 176, 180, 128, 138, + 144, 153, 134, 136, 143, 154, 255, 128, + 181, 184, 255, 129, 151, 158, 255, 129, + 131, 133, 143, 154, 255, 128, 137, 128, + 153, 157, 171, 176, 185, 160, 255, 170, + 190, 192, 255, 128, 184, 128, 136, 138, + 182, 184, 191, 128, 144, 153, 178, 255, + 168, 144, 145, 183, 255, 128, 142, 145, + 149, 129, 141, 144, 146, 147, 148, 175, + 255, 132, 255, 128, 144, 129, 143, 144, + 153, 145, 152, 135, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 161, 167, 185, 255, 128, 158, 160, 169, + 144, 173, 176, 180, 128, 131, 144, 153, + 163, 183, 189, 255, 144, 255, 133, 143, + 191, 255, 143, 159, 160, 128, 129, 255, + 159, 160, 171, 172, 255, 173, 255, 179, + 255, 128, 176, 177, 178, 128, 129, 171, + 175, 189, 255, 128, 136, 144, 153, 157, + 158, 133, 134, 137, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 168, 169, 170, 150, 153, 165, 169, 173, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 139, 140, 191, 255, 134, + 128, 132, 138, 144, 146, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 141, 192, 255, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 160, 163, 164, 165, + 184, 185, 186, 161, 162, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 133, + 143, 151, 255, 139, 143, 154, 255, 164, + 167, 185, 187, 128, 131, 133, 159, 161, + 162, 169, 178, 180, 183, 130, 135, 137, + 139, 148, 151, 153, 155, 157, 159, 164, + 190, 141, 143, 145, 146, 161, 162, 167, + 170, 172, 178, 180, 183, 185, 188, 128, + 137, 139, 155, 161, 163, 165, 169, 171, + 187, 155, 156, 151, 255, 156, 157, 160, + 181, 255, 186, 187, 255, 162, 255, 160, + 168, 161, 167, 158, 255, 160, 132, 135, + 133, 134, 176, 255, 128, 191, 154, 164, + 168, 128, 149, 150, 191, 128, 152, 153, + 191, 181, 128, 159, 160, 189, 190, 191, + 189, 128, 131, 132, 185, 186, 191, 144, + 128, 151, 152, 161, 162, 176, 177, 255, + 169, 177, 129, 132, 141, 142, 145, 146, + 179, 181, 186, 188, 190, 191, 192, 255, + 142, 158, 128, 155, 156, 161, 162, 175, + 176, 177, 178, 191, 169, 177, 180, 183, + 128, 132, 133, 138, 139, 142, 143, 144, + 145, 146, 147, 185, 186, 191, 157, 128, + 152, 153, 158, 159, 177, 178, 180, 181, + 191, 142, 146, 169, 177, 180, 189, 128, + 132, 133, 185, 186, 191, 144, 185, 128, + 159, 160, 161, 162, 191, 169, 177, 180, + 189, 128, 132, 133, 140, 141, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 158, + 177, 128, 155, 156, 161, 162, 191, 131, + 145, 155, 157, 128, 132, 133, 138, 139, + 141, 142, 149, 150, 152, 153, 159, 160, + 162, 163, 164, 165, 167, 168, 170, 171, + 173, 174, 185, 186, 191, 144, 128, 191, + 141, 145, 169, 189, 128, 132, 133, 185, + 186, 191, 128, 151, 152, 154, 155, 159, + 160, 161, 162, 191, 128, 141, 145, 169, + 180, 189, 129, 132, 133, 185, 186, 191, + 158, 128, 159, 160, 161, 162, 176, 177, + 178, 179, 191, 141, 145, 189, 128, 132, + 133, 186, 187, 191, 142, 128, 147, 148, + 150, 151, 158, 159, 161, 162, 185, 186, + 191, 178, 188, 128, 132, 133, 150, 151, + 153, 154, 189, 190, 191, 128, 134, 135, + 191, 128, 177, 129, 179, 180, 191, 128, + 131, 137, 141, 152, 160, 164, 166, 172, + 177, 189, 129, 132, 133, 134, 135, 138, + 139, 147, 148, 167, 168, 169, 170, 179, + 180, 191, 133, 128, 134, 135, 155, 156, + 159, 160, 191, 128, 129, 191, 136, 128, + 172, 173, 191, 128, 135, 136, 140, 141, + 191, 191, 128, 170, 171, 190, 161, 128, + 143, 144, 149, 150, 153, 154, 157, 158, + 164, 165, 166, 167, 173, 174, 176, 177, + 180, 181, 255, 130, 141, 143, 159, 134, + 187, 136, 140, 142, 143, 137, 151, 153, + 142, 143, 158, 159, 137, 177, 191, 142, + 143, 182, 183, 192, 255, 129, 151, 128, + 133, 134, 135, 136, 255, 145, 150, 151, + 155, 191, 192, 255, 128, 143, 144, 159, + 160, 255, 182, 183, 190, 191, 192, 255, + 128, 129, 255, 173, 174, 192, 255, 128, + 129, 154, 155, 159, 160, 255, 171, 173, + 185, 191, 192, 255, 141, 128, 145, 146, + 159, 160, 177, 178, 191, 173, 128, 145, + 146, 159, 160, 176, 177, 191, 128, 179, + 180, 191, 151, 156, 128, 191, 128, 159, + 160, 255, 184, 191, 192, 255, 169, 128, + 170, 171, 175, 176, 255, 182, 191, 192, + 255, 128, 158, 159, 191, 128, 143, 144, + 173, 174, 175, 176, 180, 181, 191, 128, + 171, 172, 175, 176, 255, 138, 191, 192, + 255, 128, 150, 151, 159, 160, 255, 149, + 191, 192, 255, 167, 128, 191, 128, 132, + 133, 179, 180, 191, 128, 132, 133, 139, + 140, 191, 128, 130, 131, 160, 161, 173, + 174, 175, 176, 185, 186, 255, 166, 191, + 192, 255, 128, 163, 164, 191, 128, 140, + 141, 143, 144, 153, 154, 189, 190, 191, + 128, 136, 137, 191, 173, 128, 168, 169, + 177, 178, 180, 181, 182, 183, 191, 0, + 127, 192, 255, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 191, 192, 255, 181, 189, 191, 128, 190, + 133, 181, 128, 129, 130, 140, 141, 143, + 144, 147, 148, 149, 150, 155, 156, 159, + 160, 172, 173, 177, 178, 188, 189, 191, + 177, 191, 128, 190, 128, 143, 144, 156, + 157, 191, 130, 135, 148, 164, 166, 168, + 128, 137, 138, 149, 150, 151, 152, 157, + 158, 169, 170, 185, 186, 187, 188, 191, + 142, 128, 132, 133, 137, 138, 159, 160, + 255, 137, 191, 192, 255, 175, 128, 255, + 159, 165, 170, 175, 177, 180, 191, 192, + 255, 166, 173, 128, 167, 168, 175, 176, + 255, 168, 174, 176, 191, 192, 255, 167, + 175, 183, 191, 128, 150, 151, 159, 160, + 190, 135, 143, 151, 128, 158, 159, 191, + 128, 132, 133, 135, 136, 160, 161, 169, + 170, 176, 177, 181, 182, 183, 184, 188, + 189, 191, 160, 151, 154, 187, 192, 255, + 128, 132, 133, 173, 174, 176, 177, 255, + 143, 159, 187, 191, 192, 255, 128, 175, + 176, 191, 150, 191, 192, 255, 141, 191, + 192, 255, 128, 143, 144, 189, 190, 191, + 141, 143, 160, 169, 172, 191, 192, 255, + 191, 128, 174, 175, 190, 128, 157, 158, + 159, 160, 255, 176, 191, 192, 255, 128, + 150, 151, 159, 160, 161, 162, 255, 175, + 137, 138, 184, 191, 192, 255, 128, 182, + 183, 255, 130, 134, 139, 163, 191, 192, + 255, 128, 129, 130, 179, 180, 191, 187, + 189, 128, 177, 178, 183, 184, 191, 128, + 137, 138, 165, 166, 175, 176, 255, 135, + 159, 189, 191, 192, 255, 128, 131, 132, + 178, 179, 191, 143, 165, 191, 128, 159, + 160, 175, 176, 185, 186, 190, 128, 168, + 169, 191, 131, 186, 128, 139, 140, 159, + 160, 182, 183, 189, 190, 255, 176, 178, + 180, 183, 184, 190, 191, 192, 255, 129, + 128, 130, 131, 154, 155, 157, 158, 159, + 160, 170, 171, 177, 178, 180, 181, 191, + 128, 167, 175, 129, 134, 135, 136, 137, + 142, 143, 144, 145, 150, 151, 159, 160, + 255, 155, 166, 175, 128, 162, 163, 191, + 164, 175, 135, 138, 188, 191, 192, 255, + 174, 175, 154, 191, 192, 255, 157, 169, + 183, 189, 191, 128, 134, 135, 146, 147, + 151, 152, 158, 159, 190, 130, 133, 128, + 255, 178, 191, 192, 255, 128, 146, 147, + 255, 190, 191, 192, 255, 128, 143, 144, + 255, 144, 145, 136, 175, 188, 191, 192, + 255, 181, 128, 175, 176, 255, 189, 191, + 192, 255, 128, 160, 161, 186, 187, 191, + 128, 129, 154, 155, 165, 166, 255, 191, + 192, 255, 128, 129, 130, 135, 136, 137, + 138, 143, 144, 145, 146, 151, 152, 153, + 154, 156, 157, 191, 128, 191, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 132, 151, 153, 155, 158, 175, 178, + 179, 180, 191, 140, 167, 187, 190, 128, + 255, 142, 143, 158, 191, 192, 255, 187, + 191, 192, 255, 128, 180, 181, 191, 128, + 156, 157, 159, 160, 255, 145, 191, 192, + 255, 128, 159, 160, 175, 176, 255, 139, + 143, 182, 191, 192, 255, 144, 132, 135, + 150, 191, 192, 255, 158, 175, 148, 151, + 188, 191, 192, 255, 128, 167, 168, 175, + 176, 255, 164, 191, 192, 255, 183, 191, + 192, 255, 128, 149, 150, 159, 160, 167, + 168, 191, 136, 182, 188, 128, 133, 134, + 137, 138, 184, 185, 190, 191, 255, 150, + 159, 183, 191, 192, 255, 179, 128, 159, + 160, 181, 182, 191, 128, 149, 150, 159, + 160, 185, 186, 191, 128, 183, 184, 189, + 190, 191, 128, 148, 152, 129, 143, 144, + 179, 180, 191, 128, 159, 160, 188, 189, + 191, 128, 156, 157, 191, 136, 128, 164, + 165, 191, 128, 181, 182, 191, 128, 149, + 150, 159, 160, 178, 179, 191, 128, 145, + 146, 191, 128, 178, 179, 191, 128, 130, + 131, 132, 133, 134, 135, 136, 138, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 162, 163, 171, 176, + 177, 178, 129, 191, 128, 130, 131, 183, + 184, 191, 128, 130, 131, 175, 176, 191, + 128, 143, 144, 168, 169, 191, 128, 130, + 131, 166, 167, 191, 182, 128, 143, 144, + 178, 179, 191, 128, 130, 131, 178, 179, + 191, 128, 154, 156, 129, 132, 133, 191, + 146, 128, 171, 172, 191, 135, 137, 142, + 158, 128, 168, 169, 175, 176, 255, 159, + 191, 192, 255, 144, 128, 156, 157, 161, + 162, 191, 128, 134, 135, 138, 139, 191, + 128, 175, 176, 191, 134, 128, 131, 132, + 135, 136, 191, 128, 174, 175, 191, 128, + 151, 152, 155, 156, 191, 132, 128, 191, + 128, 170, 171, 191, 128, 153, 154, 191, + 160, 190, 192, 255, 128, 184, 185, 191, + 137, 128, 174, 175, 191, 128, 129, 177, + 178, 255, 144, 191, 192, 255, 128, 142, + 143, 144, 145, 146, 149, 129, 148, 150, + 191, 175, 191, 192, 255, 132, 191, 192, + 255, 128, 144, 129, 143, 145, 191, 144, + 153, 128, 143, 145, 152, 154, 191, 135, + 191, 192, 255, 160, 168, 169, 171, 172, + 173, 174, 188, 189, 190, 191, 128, 159, + 161, 167, 170, 187, 185, 191, 192, 255, + 128, 143, 144, 173, 174, 191, 128, 131, + 132, 162, 163, 183, 184, 188, 189, 255, + 133, 143, 145, 191, 192, 255, 128, 146, + 147, 159, 160, 191, 160, 128, 191, 128, + 129, 191, 192, 255, 159, 160, 171, 128, + 170, 172, 191, 192, 255, 173, 191, 192, + 255, 179, 191, 192, 255, 128, 176, 177, + 178, 129, 191, 128, 129, 130, 191, 171, + 175, 189, 191, 192, 255, 128, 136, 137, + 143, 144, 153, 154, 191, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 128, 143, 150, 153, 160, 191, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 191, + 139, 140, 192, 255, 133, 145, 128, 134, + 135, 137, 138, 255, 166, 167, 129, 155, + 187, 149, 181, 143, 175, 137, 169, 131, + 140, 191, 192, 255, 160, 163, 164, 165, + 184, 185, 186, 128, 159, 161, 162, 166, + 191, 133, 191, 192, 255, 132, 160, 163, + 167, 179, 184, 186, 128, 164, 165, 168, + 169, 187, 188, 191, 130, 135, 137, 139, + 144, 147, 151, 153, 155, 157, 159, 163, + 171, 179, 184, 189, 191, 128, 140, 141, + 148, 149, 160, 161, 164, 165, 166, 167, + 190, 138, 164, 170, 128, 155, 156, 160, + 161, 187, 188, 191, 128, 191, 155, 156, + 128, 191, 151, 191, 192, 255, 156, 157, + 160, 128, 191, 181, 191, 192, 255, 158, + 159, 186, 128, 185, 187, 191, 192, 255, + 162, 191, 192, 255, 160, 168, 128, 159, + 161, 167, 169, 191, 158, 191, 192, 255, + 10, 13, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 128, 191, 128, 191, + 128, 191, 128, 191, 128, 191, 10, 128, + 191, 128, 191, 128, 191, 36, 123, 37, + 123, 10, 128, 191, 128, 191, 128, 191, + 36, 123, 37, 123, 170, 181, 183, 186, + 128, 150, 152, 182, 184, 255, 192, 255, + 128, 255, 173, 130, 133, 146, 159, 165, + 171, 175, 255, 181, 190, 184, 185, 192, + 255, 140, 134, 138, 142, 161, 163, 255, + 182, 130, 136, 137, 176, 151, 152, 154, + 160, 190, 136, 144, 192, 255, 135, 129, + 130, 132, 133, 144, 170, 176, 178, 144, + 154, 160, 191, 128, 169, 174, 255, 148, + 169, 157, 158, 189, 190, 192, 255, 144, + 255, 139, 140, 178, 255, 186, 128, 181, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 128, 173, 128, 155, 160, 180, 182, 189, + 148, 161, 163, 255, 176, 164, 165, 132, + 169, 177, 141, 142, 145, 146, 179, 181, + 186, 187, 158, 133, 134, 137, 138, 143, + 150, 152, 155, 164, 165, 178, 255, 188, + 129, 131, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 190, 255, 157, 131, 134, 137, 138, 142, + 144, 146, 152, 159, 165, 182, 255, 129, + 131, 133, 141, 143, 145, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 255, 134, + 138, 142, 143, 145, 159, 164, 165, 176, + 184, 186, 255, 129, 131, 133, 140, 143, + 144, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 191, 177, 128, 132, 135, 136, + 139, 141, 150, 151, 156, 157, 159, 163, + 166, 175, 156, 130, 131, 133, 138, 142, + 144, 146, 149, 153, 154, 158, 159, 163, + 164, 168, 170, 174, 185, 190, 191, 144, + 151, 128, 130, 134, 136, 138, 141, 166, + 175, 128, 131, 133, 140, 142, 144, 146, + 168, 170, 185, 189, 255, 133, 137, 151, + 142, 148, 155, 159, 164, 165, 176, 255, + 128, 131, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 188, 191, 158, 128, + 132, 134, 136, 138, 141, 149, 150, 160, + 163, 166, 175, 177, 178, 129, 131, 133, + 140, 142, 144, 146, 186, 189, 255, 133, + 137, 143, 147, 152, 158, 164, 165, 176, + 185, 192, 255, 189, 130, 131, 133, 150, + 154, 177, 179, 187, 138, 150, 128, 134, + 143, 148, 152, 159, 166, 175, 178, 179, + 129, 186, 128, 142, 144, 153, 132, 138, + 141, 165, 167, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 185, 187, 189, 134, 128, 132, 136, 141, + 144, 153, 156, 159, 128, 181, 183, 185, + 152, 153, 160, 169, 190, 191, 128, 135, + 137, 172, 177, 191, 128, 132, 134, 151, + 153, 188, 134, 128, 129, 130, 131, 137, + 138, 139, 140, 141, 142, 143, 144, 153, + 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 173, 175, 176, 177, 178, 179, 181, + 182, 183, 188, 189, 190, 191, 132, 152, + 172, 184, 185, 187, 128, 191, 128, 137, + 144, 255, 158, 159, 134, 187, 136, 140, + 142, 143, 137, 151, 153, 142, 143, 158, + 159, 137, 177, 142, 143, 182, 183, 191, + 255, 128, 130, 133, 136, 150, 152, 255, + 145, 150, 151, 155, 156, 160, 168, 178, + 255, 128, 143, 160, 255, 182, 183, 190, + 255, 129, 255, 173, 174, 192, 255, 129, + 154, 160, 255, 171, 173, 185, 255, 128, + 140, 142, 148, 160, 180, 128, 147, 160, + 172, 174, 176, 178, 179, 148, 150, 152, + 155, 158, 159, 170, 255, 139, 141, 144, + 153, 160, 255, 184, 255, 128, 170, 176, + 255, 182, 255, 128, 158, 160, 171, 176, + 187, 134, 173, 176, 180, 128, 171, 176, + 255, 138, 143, 155, 255, 128, 155, 160, + 255, 159, 189, 190, 192, 255, 167, 128, + 137, 144, 153, 176, 189, 140, 143, 154, + 170, 180, 255, 180, 255, 128, 183, 128, + 137, 141, 189, 128, 136, 144, 146, 148, + 182, 184, 185, 128, 181, 187, 191, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 255, 190, 128, 180, + 182, 188, 130, 132, 134, 140, 144, 147, + 150, 155, 160, 172, 178, 180, 182, 188, + 128, 129, 130, 131, 132, 133, 134, 176, + 177, 178, 179, 180, 181, 182, 183, 191, + 255, 129, 147, 149, 176, 178, 190, 192, + 255, 144, 156, 161, 144, 156, 165, 176, + 130, 135, 149, 164, 166, 168, 138, 147, + 152, 157, 170, 185, 188, 191, 142, 133, + 137, 160, 255, 137, 255, 128, 174, 176, + 255, 159, 165, 170, 180, 255, 167, 173, + 128, 165, 176, 255, 168, 174, 176, 190, + 192, 255, 128, 150, 160, 166, 168, 174, + 176, 182, 184, 190, 128, 134, 136, 142, + 144, 150, 152, 158, 160, 191, 128, 129, + 130, 131, 132, 133, 134, 135, 144, 145, + 255, 133, 135, 161, 175, 177, 181, 184, + 188, 160, 151, 152, 187, 192, 255, 133, + 173, 177, 255, 143, 159, 187, 255, 176, + 191, 182, 183, 184, 191, 192, 255, 150, + 255, 128, 146, 147, 148, 152, 153, 154, + 155, 156, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 129, 255, 141, + 255, 144, 189, 141, 143, 172, 255, 191, + 128, 175, 180, 189, 151, 159, 162, 255, + 175, 137, 138, 184, 255, 183, 255, 168, + 255, 128, 179, 188, 134, 143, 154, 159, + 184, 186, 190, 255, 128, 173, 176, 255, + 148, 159, 189, 255, 129, 142, 154, 159, + 191, 255, 128, 182, 128, 141, 144, 153, + 160, 182, 186, 255, 128, 130, 155, 157, + 160, 175, 178, 182, 129, 134, 137, 142, + 145, 150, 160, 166, 168, 174, 176, 255, + 155, 166, 175, 128, 170, 172, 173, 176, + 185, 158, 159, 160, 255, 164, 175, 135, + 138, 188, 255, 164, 169, 171, 172, 173, + 174, 175, 180, 181, 182, 183, 184, 185, + 187, 188, 189, 190, 191, 165, 186, 174, + 175, 154, 255, 190, 128, 134, 147, 151, + 157, 168, 170, 182, 184, 188, 128, 129, + 131, 132, 134, 255, 147, 255, 190, 255, + 144, 145, 136, 175, 188, 255, 128, 143, + 160, 175, 179, 180, 141, 143, 176, 180, + 182, 255, 189, 255, 191, 144, 153, 161, + 186, 129, 154, 166, 255, 191, 255, 130, + 135, 138, 143, 146, 151, 154, 156, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 161, + 169, 128, 129, 130, 131, 133, 135, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 153, 155, 178, + 179, 128, 139, 141, 166, 168, 186, 188, + 189, 191, 255, 142, 143, 158, 255, 187, + 255, 128, 180, 189, 128, 156, 160, 255, + 145, 159, 161, 255, 128, 159, 176, 255, + 139, 143, 187, 255, 128, 157, 160, 255, + 144, 132, 135, 150, 255, 158, 159, 170, + 175, 148, 151, 188, 255, 128, 167, 176, + 255, 164, 255, 183, 255, 128, 149, 160, + 167, 136, 188, 128, 133, 138, 181, 183, + 184, 191, 255, 150, 159, 183, 255, 128, + 158, 160, 178, 180, 181, 128, 149, 160, + 185, 128, 183, 190, 191, 191, 128, 131, + 133, 134, 140, 147, 149, 151, 153, 179, + 184, 186, 160, 188, 128, 156, 128, 135, + 137, 166, 128, 181, 128, 149, 160, 178, + 128, 145, 128, 178, 129, 130, 131, 132, + 133, 135, 136, 138, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 155, 156, 162, 163, 171, 176, 177, 178, + 128, 134, 135, 165, 176, 190, 144, 168, + 176, 185, 128, 180, 182, 191, 182, 144, + 179, 155, 133, 137, 141, 143, 157, 255, + 190, 128, 145, 147, 183, 136, 128, 134, + 138, 141, 143, 157, 159, 168, 176, 255, + 171, 175, 186, 255, 128, 131, 133, 140, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 191, 144, 151, 128, 132, + 135, 136, 139, 141, 157, 163, 166, 172, + 176, 180, 128, 138, 144, 153, 134, 136, + 143, 154, 255, 128, 181, 184, 255, 129, + 151, 158, 255, 129, 131, 133, 143, 154, + 255, 128, 137, 128, 153, 157, 171, 176, + 185, 160, 255, 170, 190, 192, 255, 128, + 184, 128, 136, 138, 182, 184, 191, 128, + 144, 153, 178, 255, 168, 144, 145, 183, + 255, 128, 142, 145, 149, 129, 141, 144, + 146, 147, 148, 175, 255, 132, 255, 128, + 144, 129, 143, 144, 153, 145, 152, 135, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 185, 255, + 128, 158, 160, 169, 144, 173, 176, 180, + 128, 131, 144, 153, 163, 183, 189, 255, + 144, 255, 133, 143, 191, 255, 143, 159, + 160, 128, 129, 255, 159, 160, 171, 172, + 255, 173, 255, 179, 255, 128, 176, 177, + 178, 128, 129, 171, 175, 189, 255, 128, + 136, 144, 153, 157, 158, 133, 134, 137, + 144, 145, 146, 147, 148, 149, 154, 155, + 156, 157, 158, 159, 168, 169, 170, 150, + 153, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 139, + 140, 191, 255, 134, 128, 132, 138, 144, + 146, 255, 166, 167, 129, 155, 187, 149, + 181, 143, 175, 137, 169, 131, 140, 141, + 192, 255, 128, 182, 187, 255, 173, 180, + 182, 255, 132, 155, 159, 161, 175, 128, + 160, 163, 164, 165, 184, 185, 186, 161, + 162, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 133, 143, 151, 255, 139, + 143, 154, 255, 164, 167, 185, 187, 128, + 131, 133, 159, 161, 162, 169, 178, 180, + 183, 130, 135, 137, 139, 148, 151, 153, + 155, 157, 159, 164, 190, 141, 143, 145, + 146, 161, 162, 167, 170, 172, 178, 180, + 183, 185, 188, 128, 137, 139, 155, 161, + 163, 165, 169, 171, 187, 155, 156, 151, + 255, 156, 157, 160, 181, 255, 186, 187, + 255, 162, 255, 160, 168, 161, 167, 158, + 255, 160, 132, 135, 133, 134, 176, 255, + 128, 191, 154, 164, 168, 128, 149, 150, + 191, 128, 152, 153, 191, 181, 128, 159, + 160, 189, 190, 191, 189, 128, 131, 132, + 185, 186, 191, 144, 128, 151, 152, 161, + 162, 176, 177, 255, 169, 177, 129, 132, + 141, 142, 145, 146, 179, 181, 186, 188, + 190, 191, 192, 255, 142, 158, 128, 155, + 156, 161, 162, 175, 176, 177, 178, 191, + 169, 177, 180, 183, 128, 132, 133, 138, + 139, 142, 143, 144, 145, 146, 147, 185, + 186, 191, 157, 128, 152, 153, 158, 159, + 177, 178, 180, 181, 191, 142, 146, 169, + 177, 180, 189, 128, 132, 133, 185, 186, + 191, 144, 185, 128, 159, 160, 161, 162, + 191, 169, 177, 180, 189, 128, 132, 133, + 140, 141, 142, 143, 144, 145, 146, 147, + 185, 186, 191, 158, 177, 128, 155, 156, + 161, 162, 191, 131, 145, 155, 157, 128, + 132, 133, 138, 139, 141, 142, 149, 150, + 152, 153, 159, 160, 162, 163, 164, 165, + 167, 168, 170, 171, 173, 174, 185, 186, + 191, 144, 128, 191, 141, 145, 169, 189, + 128, 132, 133, 185, 186, 191, 128, 151, + 152, 154, 155, 159, 160, 161, 162, 191, + 128, 141, 145, 169, 180, 189, 129, 132, + 133, 185, 186, 191, 158, 128, 159, 160, + 161, 162, 176, 177, 178, 179, 191, 141, + 145, 189, 128, 132, 133, 186, 187, 191, + 142, 128, 147, 148, 150, 151, 158, 159, + 161, 162, 185, 186, 191, 178, 188, 128, + 132, 133, 150, 151, 153, 154, 189, 190, + 191, 128, 134, 135, 191, 128, 177, 129, + 179, 180, 191, 128, 131, 137, 141, 152, + 160, 164, 166, 172, 177, 189, 129, 132, + 133, 134, 135, 138, 139, 147, 148, 167, + 168, 169, 170, 179, 180, 191, 133, 128, + 134, 135, 155, 156, 159, 160, 191, 128, + 129, 191, 136, 128, 172, 173, 191, 128, + 135, 136, 140, 141, 191, 191, 128, 170, + 171, 190, 161, 128, 143, 144, 149, 150, + 153, 154, 157, 158, 164, 165, 166, 167, + 173, 174, 176, 177, 180, 181, 255, 130, + 141, 143, 159, 134, 187, 136, 140, 142, + 143, 137, 151, 153, 142, 143, 158, 159, + 137, 177, 191, 142, 143, 182, 183, 192, + 255, 129, 151, 128, 133, 134, 135, 136, + 255, 145, 150, 151, 155, 191, 192, 255, + 128, 143, 144, 159, 160, 255, 182, 183, + 190, 191, 192, 255, 128, 129, 255, 173, + 174, 192, 255, 128, 129, 154, 155, 159, + 160, 255, 171, 173, 185, 191, 192, 255, + 141, 128, 145, 146, 159, 160, 177, 178, + 191, 173, 128, 145, 146, 159, 160, 176, + 177, 191, 128, 179, 180, 191, 151, 156, + 128, 191, 128, 159, 160, 255, 184, 191, + 192, 255, 169, 128, 170, 171, 175, 176, + 255, 182, 191, 192, 255, 128, 158, 159, + 191, 128, 143, 144, 173, 174, 175, 176, + 180, 181, 191, 128, 171, 172, 175, 176, + 255, 138, 191, 192, 255, 128, 150, 151, + 159, 160, 255, 149, 191, 192, 255, 167, + 128, 191, 128, 132, 133, 179, 180, 191, + 128, 132, 133, 139, 140, 191, 128, 130, + 131, 160, 161, 173, 174, 175, 176, 185, + 186, 255, 166, 191, 192, 255, 128, 163, + 164, 191, 128, 140, 141, 143, 144, 153, + 154, 189, 190, 191, 128, 136, 137, 191, + 173, 128, 168, 169, 177, 178, 180, 181, + 182, 183, 191, 0, 127, 192, 255, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 191, 192, 255, 181, + 189, 191, 128, 190, 133, 181, 128, 129, + 130, 140, 141, 143, 144, 147, 148, 149, + 150, 155, 156, 159, 160, 172, 173, 177, + 178, 188, 189, 191, 177, 191, 128, 190, + 128, 143, 144, 156, 157, 191, 130, 135, + 148, 164, 166, 168, 128, 137, 138, 149, + 150, 151, 152, 157, 158, 169, 170, 185, + 186, 187, 188, 191, 142, 128, 132, 133, + 137, 138, 159, 160, 255, 137, 191, 192, + 255, 175, 128, 255, 159, 165, 170, 175, + 177, 180, 191, 192, 255, 166, 173, 128, + 167, 168, 175, 176, 255, 168, 174, 176, + 191, 192, 255, 167, 175, 183, 191, 128, + 150, 151, 159, 160, 190, 135, 143, 151, + 128, 158, 159, 191, 128, 132, 133, 135, + 136, 160, 161, 169, 170, 176, 177, 181, + 182, 183, 184, 188, 189, 191, 160, 151, + 154, 187, 192, 255, 128, 132, 133, 173, + 174, 176, 177, 255, 143, 159, 187, 191, + 192, 255, 128, 175, 176, 191, 150, 191, + 192, 255, 141, 191, 192, 255, 128, 143, + 144, 189, 190, 191, 141, 143, 160, 169, + 172, 191, 192, 255, 191, 128, 174, 175, + 190, 128, 157, 158, 159, 160, 255, 176, + 191, 192, 255, 128, 150, 151, 159, 160, + 161, 162, 255, 175, 137, 138, 184, 191, + 192, 255, 128, 182, 183, 255, 130, 134, + 139, 163, 191, 192, 255, 128, 129, 130, + 179, 180, 191, 187, 189, 128, 177, 178, + 183, 184, 191, 128, 137, 138, 165, 166, + 175, 176, 255, 135, 159, 189, 191, 192, + 255, 128, 131, 132, 178, 179, 191, 143, + 165, 191, 128, 159, 160, 175, 176, 185, + 186, 190, 128, 168, 169, 191, 131, 186, + 128, 139, 140, 159, 160, 182, 183, 189, + 190, 255, 176, 178, 180, 183, 184, 190, + 191, 192, 255, 129, 128, 130, 131, 154, + 155, 157, 158, 159, 160, 170, 171, 177, + 178, 180, 181, 191, 128, 167, 175, 129, + 134, 135, 136, 137, 142, 143, 144, 145, + 150, 151, 159, 160, 255, 155, 166, 175, + 128, 162, 163, 191, 164, 175, 135, 138, + 188, 191, 192, 255, 174, 175, 154, 191, + 192, 255, 157, 169, 183, 189, 191, 128, + 134, 135, 146, 147, 151, 152, 158, 159, + 190, 130, 133, 128, 255, 178, 191, 192, + 255, 128, 146, 147, 255, 190, 191, 192, + 255, 128, 143, 144, 255, 144, 145, 136, + 175, 188, 191, 192, 255, 181, 128, 175, + 176, 255, 189, 191, 192, 255, 128, 160, + 161, 186, 187, 191, 128, 129, 154, 155, + 165, 166, 255, 191, 192, 255, 128, 129, + 130, 135, 136, 137, 138, 143, 144, 145, + 146, 151, 152, 153, 154, 156, 157, 191, + 128, 191, 128, 129, 130, 131, 133, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 132, 151, 153, + 155, 158, 175, 178, 179, 180, 191, 140, + 167, 187, 190, 128, 255, 142, 143, 158, + 191, 192, 255, 187, 191, 192, 255, 128, + 180, 181, 191, 128, 156, 157, 159, 160, + 255, 145, 191, 192, 255, 128, 159, 160, + 175, 176, 255, 139, 143, 182, 191, 192, + 255, 144, 132, 135, 150, 191, 192, 255, + 158, 175, 148, 151, 188, 191, 192, 255, + 128, 167, 168, 175, 176, 255, 164, 191, + 192, 255, 183, 191, 192, 255, 128, 149, + 150, 159, 160, 167, 168, 191, 136, 182, + 188, 128, 133, 134, 137, 138, 184, 185, + 190, 191, 255, 150, 159, 183, 191, 192, + 255, 179, 128, 159, 160, 181, 182, 191, + 128, 149, 150, 159, 160, 185, 186, 191, + 128, 183, 184, 189, 190, 191, 128, 148, + 152, 129, 143, 144, 179, 180, 191, 128, + 159, 160, 188, 189, 191, 128, 156, 157, + 191, 136, 128, 164, 165, 191, 128, 181, + 182, 191, 128, 149, 150, 159, 160, 178, + 179, 191, 128, 145, 146, 191, 128, 178, + 179, 191, 128, 130, 131, 132, 133, 134, + 135, 136, 138, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 162, 163, 171, 176, 177, 178, 129, 191, + 128, 130, 131, 183, 184, 191, 128, 130, + 131, 175, 176, 191, 128, 143, 144, 168, + 169, 191, 128, 130, 131, 166, 167, 191, + 182, 128, 143, 144, 178, 179, 191, 128, + 130, 131, 178, 179, 191, 128, 154, 156, + 129, 132, 133, 191, 146, 128, 171, 172, + 191, 135, 137, 142, 158, 128, 168, 169, + 175, 176, 255, 159, 191, 192, 255, 144, + 128, 156, 157, 161, 162, 191, 128, 134, + 135, 138, 139, 191, 128, 175, 176, 191, + 134, 128, 131, 132, 135, 136, 191, 128, + 174, 175, 191, 128, 151, 152, 155, 156, + 191, 132, 128, 191, 128, 170, 171, 191, + 128, 153, 154, 191, 160, 190, 192, 255, + 128, 184, 185, 191, 137, 128, 174, 175, + 191, 128, 129, 177, 178, 255, 144, 191, + 192, 255, 128, 142, 143, 144, 145, 146, + 149, 129, 148, 150, 191, 175, 191, 192, + 255, 132, 191, 192, 255, 128, 144, 129, + 143, 145, 191, 144, 153, 128, 143, 145, + 152, 154, 191, 135, 191, 192, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 128, 159, 161, 167, 170, 187, + 185, 191, 192, 255, 128, 143, 144, 173, + 174, 191, 128, 131, 132, 162, 163, 183, + 184, 188, 189, 255, 133, 143, 145, 191, + 192, 255, 128, 146, 147, 159, 160, 191, + 160, 128, 191, 128, 129, 191, 192, 255, + 159, 160, 171, 128, 170, 172, 191, 192, + 255, 173, 191, 192, 255, 179, 191, 192, + 255, 128, 176, 177, 178, 129, 191, 128, + 129, 130, 191, 171, 175, 189, 191, 192, + 255, 128, 136, 137, 143, 144, 153, 154, + 191, 144, 145, 146, 147, 148, 149, 154, + 155, 156, 157, 158, 159, 128, 143, 150, + 153, 160, 191, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 191, 139, 140, 192, 255, + 133, 145, 128, 134, 135, 137, 138, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 191, 192, 255, + 160, 163, 164, 165, 184, 185, 186, 128, + 159, 161, 162, 166, 191, 133, 191, 192, + 255, 132, 160, 163, 167, 179, 184, 186, + 128, 164, 165, 168, 169, 187, 188, 191, + 130, 135, 137, 139, 144, 147, 151, 153, + 155, 157, 159, 163, 171, 179, 184, 189, + 191, 128, 140, 141, 148, 149, 160, 161, + 164, 165, 166, 167, 190, 138, 164, 170, + 128, 155, 156, 160, 161, 187, 188, 191, + 128, 191, 155, 156, 128, 191, 151, 191, + 192, 255, 156, 157, 160, 128, 191, 181, + 191, 192, 255, 158, 159, 186, 128, 185, + 187, 191, 192, 255, 162, 191, 192, 255, + 160, 168, 128, 159, 161, 167, 169, 191, + 158, 191, 192, 255, 9, 10, 13, 32, + 33, 34, 35, 38, 46, 47, 60, 61, + 62, 64, 92, 95, 123, 124, 125, 126, + 127, 194, 195, 198, 199, 203, 204, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 238, 239, + 240, 0, 36, 37, 45, 48, 57, 58, + 63, 65, 90, 91, 96, 97, 122, 192, + 193, 196, 218, 229, 236, 241, 247, 9, + 32, 10, 61, 10, 38, 46, 42, 47, + 46, 69, 101, 48, 57, 60, 61, 61, + 62, 61, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 124, 125, + 128, 191, 170, 181, 186, 128, 191, 151, + 183, 128, 255, 192, 255, 0, 127, 173, + 130, 133, 146, 159, 165, 171, 175, 191, + 192, 255, 181, 190, 128, 175, 176, 183, + 184, 185, 186, 191, 134, 139, 141, 162, + 128, 135, 136, 255, 182, 130, 137, 176, + 151, 152, 154, 160, 136, 191, 192, 255, + 128, 143, 144, 170, 171, 175, 176, 178, + 179, 191, 128, 159, 160, 191, 176, 128, + 138, 139, 173, 174, 255, 148, 150, 164, + 167, 173, 176, 185, 189, 190, 192, 255, + 144, 128, 145, 146, 175, 176, 191, 128, + 140, 141, 255, 166, 176, 178, 191, 192, + 255, 186, 128, 137, 138, 170, 171, 179, + 180, 181, 182, 191, 160, 161, 162, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 128, 191, 128, 129, 130, 131, + 137, 138, 139, 140, 141, 142, 143, 144, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 182, 183, 184, 188, + 189, 190, 191, 132, 187, 129, 130, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 128, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 136, 143, 145, + 191, 192, 255, 182, 183, 184, 128, 191, + 128, 191, 191, 128, 190, 192, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 191, 192, 255, 158, + 159, 128, 157, 160, 191, 192, 255, 128, + 191, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 128, 163, 165, 186, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 128, + 159, 161, 169, 173, 191, 128, 191, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 92, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 123, 123, 126, 126, 37, 123, + 126, 10, 13, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 128, 191, 128, + 191, 128, 191, 10, 13, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 10, 13, 123, 10, 13, 126, 10, + 13, 126, 126, 128, 191, 128, 191, 128, + 191, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 37, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 10, + 13, 123, 10, 13, 126, 10, 13, 126, + 126, 128, 191, 128, 191, 128, 191, 95, + 194, 195, 198, 199, 203, 204, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 238, 239, 240, + 65, 90, 97, 122, 128, 191, 192, 193, + 196, 218, 229, 236, 241, 247, 248, 255, + 45, 95, 194, 195, 198, 199, 203, 204, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 243, 48, 57, 65, 90, 97, 122, + 196, 218, 229, 236, 128, 191, 170, 181, + 186, 128, 191, 151, 183, 128, 255, 192, + 255, 0, 127, 173, 130, 133, 146, 159, + 165, 171, 175, 191, 192, 255, 181, 190, + 128, 175, 176, 183, 184, 185, 186, 191, + 134, 139, 141, 162, 128, 135, 136, 255, + 182, 130, 137, 176, 151, 152, 154, 160, + 136, 191, 192, 255, 128, 143, 144, 170, + 171, 175, 176, 178, 179, 191, 128, 159, + 160, 191, 176, 128, 138, 139, 173, 174, + 255, 148, 150, 164, 167, 173, 176, 185, + 189, 190, 192, 255, 144, 128, 145, 146, + 175, 176, 191, 128, 140, 141, 255, 166, + 176, 178, 191, 192, 255, 186, 128, 137, + 138, 170, 171, 179, 180, 181, 182, 191, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 128, 191, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 182, 183, 184, 188, 189, 190, 191, 132, + 187, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 128, 191, + 128, 129, 130, 131, 132, 133, 134, 135, + 144, 136, 143, 145, 191, 192, 255, 182, + 183, 184, 128, 191, 128, 191, 191, 128, + 190, 192, 255, 128, 146, 147, 148, 152, + 153, 154, 155, 156, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 129, + 191, 192, 255, 158, 159, 128, 157, 160, + 191, 192, 255, 128, 191, 164, 169, 171, + 172, 173, 174, 175, 180, 181, 182, 183, + 184, 185, 187, 188, 189, 190, 191, 128, + 163, 165, 186, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 128, 159, 161, 169, 173, + 191, 128, 191, +} + +var _hcltok_single_lengths []byte = []byte{ + 0, 1, 1, 2, 3, 2, 0, 32, + 31, 36, 1, 4, 0, 0, 0, 0, + 1, 2, 1, 1, 1, 1, 0, 1, + 1, 0, 0, 2, 0, 0, 0, 1, + 32, 0, 0, 0, 0, 1, 3, 1, + 1, 1, 0, 2, 0, 1, 1, 2, + 0, 3, 0, 1, 0, 2, 1, 2, + 0, 0, 5, 1, 4, 0, 0, 1, + 43, 0, 0, 0, 2, 3, 2, 1, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 1, + 0, 15, 0, 0, 0, 1, 6, 1, + 0, 0, 1, 0, 2, 0, 0, 0, + 9, 0, 1, 1, 0, 0, 0, 3, + 0, 1, 0, 28, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 2, + 0, 0, 18, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 16, 36, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 2, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 28, 0, 0, 0, 1, + 1, 1, 1, 0, 0, 2, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 4, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 3, 0, 0, 4, 0, + 0, 0, 18, 0, 0, 0, 1, 4, + 1, 4, 1, 0, 3, 2, 2, 2, + 1, 0, 0, 1, 8, 0, 0, 0, + 4, 12, 0, 2, 0, 3, 0, 1, + 0, 2, 0, 1, 2, 0, 3, 1, + 2, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 1, 28, 3, 0, 1, 1, + 2, 1, 0, 1, 1, 2, 1, 1, + 2, 1, 1, 0, 2, 1, 1, 1, + 1, 0, 0, 6, 1, 1, 0, 0, + 46, 1, 1, 0, 0, 0, 0, 2, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 13, 2, 0, 0, + 0, 9, 0, 1, 28, 0, 1, 3, + 0, 2, 0, 0, 0, 1, 0, 1, + 1, 2, 0, 18, 2, 0, 0, 16, + 35, 0, 0, 0, 1, 0, 28, 0, + 0, 0, 0, 1, 0, 2, 0, 0, + 1, 0, 0, 1, 0, 0, 1, 0, + 0, 0, 0, 1, 11, 0, 0, 0, + 0, 4, 0, 12, 1, 7, 0, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 2, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 2, 2, 1, 0, 0, 0, + 2, 2, 4, 0, 0, 0, 0, 1, + 2, 1, 1, 1, 1, 0, 1, 1, + 0, 0, 2, 0, 0, 0, 1, 32, + 0, 0, 0, 0, 1, 3, 1, 1, + 1, 0, 2, 0, 1, 1, 2, 0, + 3, 0, 1, 0, 2, 1, 2, 0, + 0, 5, 1, 4, 0, 0, 1, 43, + 0, 0, 0, 2, 3, 2, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 1, 0, + 15, 0, 0, 0, 1, 6, 1, 0, + 0, 1, 0, 2, 0, 0, 0, 9, + 0, 1, 1, 0, 0, 0, 3, 0, + 1, 0, 28, 0, 0, 0, 1, 0, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 18, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 16, 36, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 2, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 28, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 2, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 4, 0, 0, 2, 2, 0, + 11, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 3, 0, 0, 4, 0, 0, + 0, 18, 0, 0, 0, 1, 4, 1, + 4, 1, 0, 3, 2, 2, 2, 1, + 0, 0, 1, 8, 0, 0, 0, 4, + 12, 0, 2, 0, 3, 0, 1, 0, + 2, 0, 1, 2, 0, 0, 3, 0, + 1, 1, 1, 2, 2, 4, 1, 6, + 2, 4, 2, 4, 1, 4, 0, 6, + 1, 3, 1, 2, 0, 2, 11, 1, + 1, 1, 0, 1, 1, 0, 2, 0, + 3, 3, 2, 1, 0, 0, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 4, 3, 2, + 2, 0, 6, 1, 0, 1, 1, 0, + 2, 0, 4, 3, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 1, 0, 3, 0, 2, 0, + 0, 0, 3, 0, 2, 1, 1, 3, + 1, 0, 0, 0, 0, 0, 5, 2, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 1, 1, 0, 0, 35, 4, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 3, 0, 1, + 0, 0, 3, 0, 0, 1, 0, 0, + 0, 0, 28, 0, 0, 0, 0, 1, + 0, 3, 1, 4, 0, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 0, + 1, 1, 0, 7, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 1, + 1, 3, 0, 0, 4, 0, 0, 0, + 12, 1, 4, 1, 5, 2, 0, 3, + 2, 2, 2, 1, 7, 0, 7, 17, + 3, 0, 2, 0, 3, 0, 0, 1, + 0, 2, 0, 53, 2, 1, 1, 1, + 1, 1, 2, 3, 2, 2, 1, 34, + 1, 1, 0, 3, 2, 0, 0, 0, + 1, 2, 4, 1, 0, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 30, 47, 13, 9, 3, 0, 1, 28, + 2, 0, 18, 16, 0, 6, 4, 2, + 2, 0, 1, 1, 1, 2, 1, 2, + 0, 0, 0, 4, 2, 2, 3, 3, + 2, 1, 1, 0, 0, 0, 4, 2, + 2, 3, 3, 2, 1, 1, 0, 0, + 0, 33, 34, 0, 3, 2, 0, 0, + 0, 1, 2, 4, 1, 0, 1, 0, + 0, 0, 0, 1, 1, 1, 0, 0, + 1, 30, 47, 13, 9, 3, 0, 1, + 28, 2, 0, 18, 16, 0, +} + +var _hcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 1, 1, 1, 5, + 5, 5, 0, 0, 3, 0, 1, 1, + 4, 2, 3, 0, 1, 0, 2, 2, + 4, 2, 2, 3, 1, 1, 1, 1, + 0, 1, 1, 2, 2, 1, 4, 6, + 9, 6, 8, 5, 8, 7, 10, 4, + 6, 4, 7, 7, 5, 5, 4, 5, + 1, 2, 8, 4, 3, 3, 3, 0, + 3, 1, 2, 1, 2, 2, 3, 3, + 1, 3, 2, 2, 1, 2, 2, 2, + 3, 4, 4, 3, 1, 2, 1, 3, + 2, 2, 2, 2, 2, 3, 3, 1, + 1, 2, 1, 3, 2, 2, 3, 2, + 7, 0, 1, 4, 1, 2, 4, 2, + 1, 2, 0, 2, 2, 3, 5, 5, + 1, 4, 1, 1, 2, 2, 1, 0, + 0, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 1, 1, 1, 4, 2, 2, + 3, 1, 4, 4, 6, 1, 3, 1, + 1, 2, 1, 1, 1, 5, 3, 1, + 1, 1, 2, 3, 3, 1, 2, 2, + 1, 4, 1, 2, 5, 2, 1, 1, + 0, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 2, 4, 2, 1, + 2, 2, 2, 6, 1, 1, 2, 1, + 2, 1, 1, 1, 2, 2, 2, 1, + 3, 2, 5, 2, 8, 6, 2, 2, + 2, 2, 3, 1, 3, 1, 2, 1, + 3, 2, 2, 3, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 4, 1, 2, + 1, 0, 1, 1, 1, 1, 0, 1, + 2, 3, 1, 3, 3, 1, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 2, 2, 2, 1, 5, 2, 2, + 5, 7, 5, 0, 1, 0, 1, 1, + 1, 1, 1, 0, 1, 1, 0, 3, + 3, 1, 1, 2, 1, 3, 5, 1, + 1, 2, 2, 1, 1, 1, 1, 2, + 6, 3, 7, 2, 6, 1, 6, 2, + 8, 0, 4, 2, 5, 2, 3, 3, + 3, 1, 2, 8, 2, 0, 2, 1, + 2, 1, 5, 2, 1, 3, 3, 0, + 2, 1, 2, 1, 0, 1, 1, 3, + 1, 1, 2, 3, 0, 0, 3, 2, + 4, 1, 4, 1, 1, 3, 1, 1, + 1, 1, 2, 2, 1, 3, 1, 4, + 3, 3, 1, 1, 5, 2, 1, 1, + 2, 1, 2, 1, 3, 2, 0, 1, + 1, 1, 1, 1, 1, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 0, 1, + 1, 2, 2, 1, 1, 1, 3, 2, + 1, 0, 2, 1, 1, 1, 1, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 5, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 3, 0, 1, 1, 4, + 2, 3, 0, 1, 0, 2, 2, 4, + 2, 2, 3, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 4, 6, 9, + 6, 8, 5, 8, 7, 10, 4, 6, + 4, 7, 7, 5, 5, 4, 5, 1, + 2, 8, 4, 3, 3, 3, 0, 3, + 1, 2, 1, 2, 2, 3, 3, 1, + 3, 2, 2, 1, 2, 2, 2, 3, + 4, 4, 3, 1, 2, 1, 3, 2, + 2, 2, 2, 2, 3, 3, 1, 1, + 2, 1, 3, 2, 2, 3, 2, 7, + 0, 1, 4, 1, 2, 4, 2, 1, + 2, 0, 2, 2, 3, 5, 5, 1, + 4, 1, 1, 2, 2, 1, 0, 0, + 1, 1, 1, 1, 1, 2, 2, 2, + 2, 1, 1, 1, 4, 2, 2, 3, + 1, 4, 4, 6, 1, 3, 1, 1, + 2, 1, 1, 1, 5, 3, 1, 1, + 1, 2, 3, 3, 1, 2, 2, 1, + 4, 1, 2, 5, 2, 1, 1, 0, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 1, 2, 4, 2, 1, 2, + 2, 2, 6, 1, 1, 2, 1, 2, + 1, 1, 1, 2, 2, 2, 1, 3, + 2, 5, 2, 8, 6, 2, 2, 2, + 2, 3, 1, 3, 1, 2, 1, 3, + 2, 2, 3, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 4, 1, 2, 1, + 0, 1, 1, 1, 1, 0, 1, 2, + 3, 1, 3, 3, 1, 0, 3, 0, + 2, 3, 1, 0, 0, 0, 0, 2, + 2, 2, 2, 1, 5, 2, 2, 5, + 7, 5, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 1, 1, 1, 2, 2, + 3, 3, 4, 7, 5, 7, 5, 3, + 3, 7, 3, 13, 1, 3, 5, 3, + 5, 3, 6, 5, 2, 2, 8, 4, + 1, 2, 3, 2, 10, 2, 2, 0, + 2, 3, 3, 1, 2, 3, 3, 1, + 2, 3, 3, 4, 4, 2, 1, 2, + 2, 3, 2, 2, 5, 3, 2, 3, + 2, 1, 3, 3, 6, 2, 2, 5, + 2, 5, 1, 1, 2, 4, 1, 11, + 1, 3, 8, 4, 2, 1, 0, 4, + 3, 3, 3, 2, 9, 1, 1, 4, + 3, 2, 2, 2, 3, 4, 2, 3, + 2, 4, 3, 2, 2, 3, 3, 4, + 3, 3, 4, 2, 5, 4, 8, 7, + 1, 2, 1, 3, 1, 2, 5, 1, + 2, 2, 2, 2, 1, 3, 2, 2, + 3, 3, 1, 9, 1, 5, 1, 3, + 2, 2, 3, 2, 3, 3, 3, 1, + 3, 3, 2, 2, 4, 5, 3, 3, + 4, 3, 3, 3, 2, 2, 2, 4, + 2, 2, 1, 3, 3, 3, 3, 3, + 3, 2, 2, 3, 2, 3, 3, 2, + 3, 2, 3, 1, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 3, + 2, 3, 2, 3, 5, 3, 3, 1, + 2, 3, 2, 2, 1, 2, 3, 4, + 3, 0, 3, 0, 2, 3, 1, 0, + 0, 0, 0, 2, 3, 2, 4, 6, + 4, 1, 1, 2, 1, 2, 1, 3, + 2, 3, 2, 11, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 5, + 0, 0, 1, 1, 1, 0, 1, 1, + 5, 4, 2, 0, 1, 0, 2, 2, + 5, 2, 3, 5, 3, 2, 3, 5, + 1, 1, 1, 3, 1, 1, 2, 2, + 3, 1, 2, 3, 1, 5, 6, 0, + 0, 0, 0, 0, 0, 0, 0, 5, + 1, 1, 1, 5, 6, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 0, 0, 0, 0, 1, 1, + 1, 8, 5, 1, 1, 1, 0, 1, + 1, 5, 4, 2, 0, 1, 0, 2, + 2, 5, 2, 3, 5, 3, 2, 3, + 5, 1, 1, 1, 3, 1, 1, 2, + 2, 3, 1, 2, 3, 1, +} + +var _hcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 7, 12, 16, 18, + 56, 93, 135, 137, 142, 146, 147, 149, + 151, 157, 162, 167, 169, 172, 174, 177, + 181, 187, 190, 193, 199, 201, 203, 205, + 208, 241, 243, 245, 248, 251, 254, 262, + 270, 281, 289, 298, 306, 315, 324, 336, + 343, 350, 358, 366, 375, 381, 389, 395, + 403, 405, 408, 422, 428, 436, 440, 444, + 446, 493, 495, 498, 500, 505, 511, 517, + 522, 525, 529, 532, 535, 537, 540, 543, + 546, 550, 555, 560, 564, 566, 569, 571, + 575, 578, 581, 584, 587, 591, 596, 600, + 602, 604, 607, 609, 613, 616, 619, 627, + 631, 639, 655, 657, 662, 664, 668, 679, + 683, 685, 688, 690, 693, 698, 702, 708, + 714, 725, 730, 733, 736, 739, 742, 744, + 748, 749, 752, 754, 784, 786, 788, 791, + 795, 798, 802, 804, 806, 808, 814, 817, + 820, 824, 826, 831, 836, 843, 846, 850, + 854, 856, 859, 879, 881, 883, 890, 894, + 896, 898, 900, 903, 907, 911, 913, 917, + 920, 922, 927, 945, 984, 990, 993, 995, + 997, 999, 1002, 1005, 1008, 1011, 1014, 1018, + 1021, 1024, 1027, 1029, 1031, 1034, 1041, 1044, + 1046, 1049, 1052, 1055, 1063, 1065, 1067, 1070, + 1072, 1075, 1077, 1079, 1109, 1112, 1115, 1118, + 1121, 1126, 1130, 1137, 1140, 1149, 1158, 1161, + 1165, 1168, 1171, 1175, 1177, 1181, 1183, 1186, + 1188, 1192, 1196, 1200, 1208, 1210, 1212, 1216, + 1220, 1222, 1235, 1237, 1240, 1243, 1248, 1250, + 1253, 1255, 1257, 1260, 1265, 1267, 1269, 1274, + 1276, 1279, 1283, 1303, 1307, 1311, 1313, 1315, + 1323, 1325, 1332, 1337, 1339, 1343, 1346, 1349, + 1352, 1356, 1359, 1362, 1366, 1376, 1382, 1385, + 1388, 1398, 1418, 1424, 1427, 1429, 1433, 1435, + 1438, 1440, 1444, 1446, 1448, 1452, 1454, 1458, + 1463, 1469, 1471, 1473, 1476, 1478, 1482, 1489, + 1492, 1494, 1497, 1501, 1531, 1536, 1538, 1541, + 1545, 1554, 1559, 1567, 1571, 1579, 1583, 1591, + 1595, 1606, 1608, 1614, 1617, 1625, 1629, 1634, + 1639, 1644, 1646, 1649, 1664, 1668, 1670, 1673, + 1675, 1724, 1727, 1734, 1737, 1739, 1743, 1747, + 1750, 1754, 1756, 1759, 1761, 1763, 1765, 1767, + 1771, 1773, 1775, 1778, 1782, 1796, 1799, 1803, + 1806, 1811, 1822, 1827, 1830, 1860, 1864, 1867, + 1872, 1874, 1878, 1881, 1884, 1886, 1891, 1893, + 1899, 1904, 1910, 1912, 1932, 1940, 1943, 1945, + 1963, 2001, 2003, 2006, 2008, 2013, 2016, 2045, + 2047, 2049, 2051, 2053, 2056, 2058, 2062, 2065, + 2067, 2070, 2072, 2074, 2077, 2079, 2081, 2083, + 2085, 2087, 2090, 2093, 2096, 2109, 2111, 2115, + 2118, 2120, 2125, 2128, 2142, 2145, 2154, 2156, + 2161, 2165, 2166, 2168, 2170, 2176, 2181, 2186, + 2188, 2191, 2193, 2196, 2200, 2206, 2209, 2212, + 2218, 2220, 2222, 2224, 2227, 2260, 2262, 2264, + 2267, 2270, 2273, 2281, 2289, 2300, 2308, 2317, + 2325, 2334, 2343, 2355, 2362, 2369, 2377, 2385, + 2394, 2400, 2408, 2414, 2422, 2424, 2427, 2441, + 2447, 2455, 2459, 2463, 2465, 2512, 2514, 2517, + 2519, 2524, 2530, 2536, 2541, 2544, 2548, 2551, + 2554, 2556, 2559, 2562, 2565, 2569, 2574, 2579, + 2583, 2585, 2588, 2590, 2594, 2597, 2600, 2603, + 2606, 2610, 2615, 2619, 2621, 2623, 2626, 2628, + 2632, 2635, 2638, 2646, 2650, 2658, 2674, 2676, + 2681, 2683, 2687, 2698, 2702, 2704, 2707, 2709, + 2712, 2717, 2721, 2727, 2733, 2744, 2749, 2752, + 2755, 2758, 2761, 2763, 2767, 2768, 2771, 2773, + 2803, 2805, 2807, 2810, 2814, 2817, 2821, 2823, + 2825, 2827, 2833, 2836, 2839, 2843, 2845, 2850, + 2855, 2862, 2865, 2869, 2873, 2875, 2878, 2898, + 2900, 2902, 2909, 2913, 2915, 2917, 2919, 2922, + 2926, 2930, 2932, 2936, 2939, 2941, 2946, 2964, + 3003, 3009, 3012, 3014, 3016, 3018, 3021, 3024, + 3027, 3030, 3033, 3037, 3040, 3043, 3046, 3048, + 3050, 3053, 3060, 3063, 3065, 3068, 3071, 3074, + 3082, 3084, 3086, 3089, 3091, 3094, 3096, 3098, + 3128, 3131, 3134, 3137, 3140, 3145, 3149, 3156, + 3159, 3168, 3177, 3180, 3184, 3187, 3190, 3194, + 3196, 3200, 3202, 3205, 3207, 3211, 3215, 3219, + 3227, 3229, 3231, 3235, 3239, 3241, 3254, 3256, + 3259, 3262, 3267, 3269, 3272, 3274, 3276, 3279, + 3284, 3286, 3288, 3293, 3295, 3298, 3302, 3322, + 3326, 3330, 3332, 3334, 3342, 3344, 3351, 3356, + 3358, 3362, 3365, 3368, 3371, 3375, 3378, 3381, + 3385, 3395, 3401, 3404, 3407, 3417, 3437, 3443, + 3446, 3448, 3452, 3454, 3457, 3459, 3463, 3465, + 3467, 3471, 3473, 3475, 3481, 3484, 3489, 3494, + 3500, 3510, 3518, 3530, 3537, 3547, 3553, 3565, + 3571, 3589, 3592, 3600, 3606, 3616, 3623, 3630, + 3638, 3646, 3649, 3654, 3674, 3680, 3683, 3687, + 3691, 3695, 3707, 3710, 3715, 3716, 3722, 3729, + 3735, 3738, 3741, 3745, 3749, 3752, 3755, 3760, + 3764, 3770, 3776, 3779, 3783, 3786, 3789, 3794, + 3797, 3800, 3806, 3810, 3813, 3817, 3820, 3823, + 3827, 3831, 3838, 3841, 3844, 3850, 3853, 3860, + 3862, 3864, 3867, 3876, 3881, 3895, 3899, 3903, + 3918, 3924, 3927, 3930, 3932, 3937, 3943, 3947, + 3955, 3961, 3971, 3974, 3977, 3982, 3986, 3989, + 3992, 3995, 3999, 4004, 4008, 4012, 4015, 4020, + 4025, 4028, 4034, 4038, 4044, 4049, 4053, 4057, + 4065, 4068, 4076, 4082, 4092, 4103, 4106, 4109, + 4111, 4115, 4117, 4120, 4131, 4135, 4138, 4141, + 4144, 4147, 4149, 4153, 4157, 4160, 4164, 4169, + 4172, 4182, 4184, 4225, 4231, 4235, 4238, 4241, + 4245, 4248, 4252, 4256, 4261, 4263, 4267, 4271, + 4274, 4277, 4282, 4291, 4295, 4300, 4305, 4309, + 4316, 4320, 4323, 4327, 4330, 4335, 4338, 4341, + 4371, 4375, 4379, 4383, 4387, 4392, 4396, 4402, + 4406, 4414, 4417, 4422, 4426, 4429, 4434, 4437, + 4441, 4444, 4447, 4450, 4453, 4456, 4460, 4464, + 4467, 4477, 4480, 4483, 4488, 4494, 4497, 4512, + 4515, 4519, 4525, 4529, 4533, 4536, 4540, 4547, + 4550, 4553, 4559, 4562, 4566, 4571, 4587, 4589, + 4597, 4599, 4607, 4613, 4615, 4619, 4622, 4625, + 4628, 4632, 4643, 4646, 4658, 4682, 4690, 4692, + 4696, 4699, 4704, 4707, 4709, 4714, 4717, 4723, + 4726, 4734, 4736, 4738, 4740, 4742, 4744, 4746, + 4748, 4750, 4752, 4755, 4758, 4760, 4762, 4764, + 4766, 4769, 4772, 4777, 4781, 4782, 4784, 4786, + 4792, 4797, 4802, 4804, 4807, 4809, 4812, 4816, + 4822, 4825, 4828, 4834, 4836, 4838, 4840, 4843, + 4876, 4878, 4880, 4883, 4886, 4889, 4897, 4905, + 4916, 4924, 4933, 4941, 4950, 4959, 4971, 4978, + 4985, 4993, 5001, 5010, 5016, 5024, 5030, 5038, + 5040, 5043, 5057, 5063, 5071, 5075, 5079, 5081, + 5128, 5130, 5133, 5135, 5140, 5146, 5152, 5157, + 5160, 5164, 5167, 5170, 5172, 5175, 5178, 5181, + 5185, 5190, 5195, 5199, 5201, 5204, 5206, 5210, + 5213, 5216, 5219, 5222, 5226, 5231, 5235, 5237, + 5239, 5242, 5244, 5248, 5251, 5254, 5262, 5266, + 5274, 5290, 5292, 5297, 5299, 5303, 5314, 5318, + 5320, 5323, 5325, 5328, 5333, 5337, 5343, 5349, + 5360, 5365, 5368, 5371, 5374, 5377, 5379, 5383, + 5384, 5387, 5389, 5419, 5421, 5423, 5426, 5430, + 5433, 5437, 5439, 5441, 5443, 5449, 5452, 5455, + 5459, 5461, 5466, 5471, 5478, 5481, 5485, 5489, + 5491, 5494, 5514, 5516, 5518, 5525, 5529, 5531, + 5533, 5535, 5538, 5542, 5546, 5548, 5552, 5555, + 5557, 5562, 5580, 5619, 5625, 5628, 5630, 5632, + 5634, 5637, 5640, 5643, 5646, 5649, 5653, 5656, + 5659, 5662, 5664, 5666, 5669, 5676, 5679, 5681, + 5684, 5687, 5690, 5698, 5700, 5702, 5705, 5707, + 5710, 5712, 5714, 5744, 5747, 5750, 5753, 5756, + 5761, 5765, 5772, 5775, 5784, 5793, 5796, 5800, + 5803, 5806, 5810, 5812, 5816, 5818, 5821, 5823, + 5827, 5831, 5835, 5843, 5845, 5847, 5851, 5855, + 5857, 5870, 5872, 5875, 5878, 5883, 5885, 5888, + 5890, 5892, 5895, 5900, 5902, 5904, 5909, 5911, + 5914, 5918, 5938, 5942, 5946, 5948, 5950, 5958, + 5960, 5967, 5972, 5974, 5978, 5981, 5984, 5987, + 5991, 5994, 5997, 6001, 6011, 6017, 6020, 6023, + 6033, 6053, 6059, 6062, 6064, 6068, 6070, 6073, + 6075, 6079, 6081, 6083, 6087, 6089, 6091, 6097, + 6100, 6105, 6110, 6116, 6126, 6134, 6146, 6153, + 6163, 6169, 6181, 6187, 6205, 6208, 6216, 6222, + 6232, 6239, 6246, 6254, 6262, 6265, 6270, 6290, + 6296, 6299, 6303, 6307, 6311, 6323, 6326, 6331, + 6332, 6338, 6345, 6351, 6354, 6357, 6361, 6365, + 6368, 6371, 6376, 6380, 6386, 6392, 6395, 6399, + 6402, 6405, 6410, 6413, 6416, 6422, 6426, 6429, + 6433, 6436, 6439, 6443, 6447, 6454, 6457, 6460, + 6466, 6469, 6476, 6478, 6480, 6483, 6492, 6497, + 6511, 6515, 6519, 6534, 6540, 6543, 6546, 6548, + 6553, 6559, 6563, 6571, 6577, 6587, 6590, 6593, + 6598, 6602, 6605, 6608, 6611, 6615, 6620, 6624, + 6628, 6631, 6636, 6641, 6644, 6650, 6654, 6660, + 6665, 6669, 6673, 6681, 6684, 6692, 6698, 6708, + 6719, 6722, 6725, 6727, 6731, 6733, 6736, 6747, + 6751, 6754, 6757, 6760, 6763, 6765, 6769, 6773, + 6776, 6780, 6785, 6788, 6798, 6800, 6841, 6847, + 6851, 6854, 6857, 6861, 6864, 6868, 6872, 6877, + 6879, 6883, 6887, 6890, 6893, 6898, 6907, 6911, + 6916, 6921, 6925, 6932, 6936, 6939, 6943, 6946, + 6951, 6954, 6957, 6987, 6991, 6995, 6999, 7003, + 7008, 7012, 7018, 7022, 7030, 7033, 7038, 7042, + 7045, 7050, 7053, 7057, 7060, 7063, 7066, 7069, + 7072, 7076, 7080, 7083, 7093, 7096, 7099, 7104, + 7110, 7113, 7128, 7131, 7135, 7141, 7145, 7149, + 7152, 7156, 7163, 7166, 7169, 7175, 7178, 7182, + 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231, + 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274, + 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325, + 7330, 7333, 7339, 7342, 7407, 7410, 7412, 7414, + 7416, 7418, 7420, 7423, 7428, 7431, 7434, 7436, + 7476, 7478, 7480, 7482, 7487, 7491, 7492, 7494, + 7496, 7503, 7510, 7517, 7519, 7521, 7523, 7526, + 7529, 7535, 7538, 7543, 7550, 7555, 7558, 7562, + 7569, 7601, 7650, 7665, 7678, 7683, 7685, 7689, + 7720, 7726, 7728, 7749, 7769, 7771, 7783, 7794, + 7797, 7800, 7801, 7803, 7805, 7807, 7810, 7812, + 7820, 7822, 7824, 7826, 7836, 7845, 7848, 7852, + 7856, 7859, 7861, 7863, 7865, 7867, 7869, 7879, + 7888, 7891, 7895, 7899, 7902, 7904, 7906, 7908, + 7910, 7912, 7954, 7994, 7996, 8001, 8005, 8006, + 8008, 8010, 8017, 8024, 8031, 8033, 8035, 8037, + 8040, 8043, 8049, 8052, 8057, 8064, 8069, 8072, + 8076, 8083, 8115, 8164, 8179, 8192, 8197, 8199, + 8203, 8234, 8240, 8242, 8263, 8283, +} + +var _hcltok_indicies []int16 = []int16{ + 1, 0, 3, 2, 3, 4, 2, 6, + 8, 8, 7, 5, 9, 9, 7, 5, + 7, 5, 10, 11, 12, 13, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 39, 40, 41, + 42, 43, 11, 11, 14, 14, 38, 0, + 11, 12, 13, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 39, 40, 41, 42, 43, 11, + 11, 14, 14, 38, 0, 44, 45, 11, + 11, 46, 13, 15, 16, 17, 16, 47, + 48, 20, 49, 22, 23, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 37, 39, 63, 41, 64, 65, + 66, 11, 11, 11, 14, 38, 0, 44, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 0, 11, 0, 11, 11, 0, 0, + 0, 0, 0, 0, 11, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 0, + 11, 0, 0, 11, 0, 11, 0, 0, + 11, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 11, 11, 11, 0, + 67, 68, 69, 70, 14, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 0, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 16, + 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 14, 15, 133, 134, 135, 136, + 137, 14, 16, 14, 0, 11, 0, 11, + 11, 0, 0, 11, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 0, + 11, 11, 11, 0, 0, 0, 11, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 0, 0, 11, 11, 11, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 0, 11, 11, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 11, 11, 11, 0, 0, 0, 0, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 0, 11, 11, 11, 0, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 14, 147, 148, 149, 150, 151, 0, 11, + 0, 0, 0, 0, 0, 11, 11, 0, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 0, 152, 109, 153, 154, 155, 14, + 156, 157, 16, 14, 0, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 119, 0, 16, 14, 14, 158, 0, 14, + 0, 11, 16, 159, 160, 16, 161, 162, + 16, 57, 163, 164, 165, 166, 167, 16, + 168, 169, 170, 16, 171, 172, 173, 15, + 174, 175, 176, 15, 177, 16, 14, 0, + 0, 11, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 0, 0, 0, + 0, 11, 11, 0, 0, 11, 11, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 0, 0, 0, 11, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 0, 0, 0, 11, 11, 11, + 11, 0, 178, 179, 0, 14, 0, 11, + 0, 0, 11, 16, 180, 181, 182, 183, + 57, 184, 185, 55, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 14, 0, 0, + 11, 0, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 0, 11, 0, + 0, 11, 0, 11, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 0, + 11, 11, 11, 11, 0, 11, 11, 0, + 0, 11, 11, 11, 11, 11, 0, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 201, 206, 207, 208, 209, 38, + 0, 210, 211, 16, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 16, 14, 221, + 222, 223, 224, 16, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 16, 144, 14, 240, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 11, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 11, 11, 0, 0, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 11, 0, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 0, 11, 0, 11, 11, 0, 11, 0, + 11, 11, 0, 11, 0, 11, 0, 241, + 212, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 98, 251, 16, 252, 253, 254, + 16, 255, 129, 256, 257, 258, 259, 260, + 261, 262, 263, 16, 0, 0, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 11, 11, + 0, 11, 11, 11, 0, 11, 0, 0, + 0, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 0, 0, 0, 11, + 16, 181, 264, 265, 14, 16, 14, 0, + 0, 11, 0, 11, 16, 264, 14, 0, + 16, 266, 14, 0, 0, 11, 16, 267, + 268, 269, 172, 270, 271, 16, 272, 273, + 274, 14, 0, 0, 11, 11, 11, 0, + 11, 11, 0, 11, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 0, 11, + 0, 16, 14, 0, 275, 16, 276, 0, + 14, 0, 11, 0, 11, 277, 16, 278, + 279, 0, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 280, 281, 282, 16, 283, + 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 295, 296, 14, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 0, 11, 0, 0, 0, + 0, 0, 0, 11, 11, 11, 11, 11, + 0, 0, 11, 0, 0, 0, 11, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 297, 16, + 298, 16, 299, 300, 301, 302, 14, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 0, + 303, 16, 14, 0, 11, 304, 16, 100, + 14, 0, 11, 305, 0, 14, 0, 11, + 16, 306, 14, 0, 0, 11, 307, 0, + 16, 308, 14, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 0, 11, 0, 0, 0, + 11, 11, 11, 11, 0, 309, 310, 69, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 331, 332, 333, 334, 335, + 336, 330, 0, 11, 11, 11, 11, 0, + 11, 0, 11, 11, 0, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 0, 11, 11, 11, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 0, 337, 338, 339, 101, 102, + 103, 104, 105, 340, 107, 108, 109, 110, + 111, 112, 341, 342, 167, 343, 258, 117, + 344, 119, 229, 269, 122, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 131, + 355, 16, 14, 15, 16, 134, 135, 136, + 137, 14, 14, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 11, 0, 0, 11, 11, 11, 0, 0, + 11, 11, 0, 11, 0, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 0, 11, + 11, 0, 11, 11, 11, 0, 356, 140, + 142, 143, 144, 145, 146, 14, 357, 148, + 358, 150, 359, 0, 11, 11, 0, 0, + 0, 0, 11, 0, 0, 11, 11, 11, + 11, 11, 0, 360, 109, 361, 154, 155, + 14, 156, 157, 16, 14, 0, 11, 11, + 11, 11, 0, 0, 0, 11, 16, 159, + 160, 16, 362, 363, 219, 308, 163, 164, + 165, 364, 167, 365, 366, 367, 368, 369, + 370, 371, 372, 373, 374, 175, 176, 15, + 375, 16, 14, 0, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 16, 180, 181, 376, 183, 57, 184, 185, + 55, 186, 187, 377, 14, 190, 378, 192, + 193, 194, 14, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 0, 11, + 0, 379, 380, 197, 198, 199, 381, 201, + 202, 382, 383, 384, 201, 206, 207, 208, + 209, 38, 0, 210, 211, 16, 212, 213, + 215, 385, 217, 386, 219, 220, 16, 14, + 387, 222, 223, 224, 16, 225, 226, 227, + 228, 229, 230, 231, 232, 388, 234, 235, + 389, 237, 238, 239, 16, 144, 14, 240, + 0, 0, 11, 0, 0, 11, 0, 11, + 11, 11, 11, 11, 0, 11, 11, 0, + 390, 391, 392, 393, 394, 395, 396, 397, + 247, 398, 319, 399, 213, 400, 401, 402, + 403, 404, 401, 405, 406, 407, 258, 408, + 260, 409, 410, 271, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 11, 11, 11, 0, 11, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 0, 0, + 0, 11, 11, 11, 0, 11, 11, 0, + 16, 267, 229, 411, 401, 412, 271, 16, + 413, 414, 274, 14, 0, 11, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 277, 16, 278, 415, 0, 11, 11, 0, + 16, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 416, 14, 0, 0, 0, + 11, 16, 417, 16, 265, 300, 301, 302, + 14, 0, 0, 11, 419, 419, 419, 419, + 418, 419, 419, 419, 418, 419, 418, 419, + 419, 418, 418, 418, 418, 418, 418, 419, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 418, 418, 419, 418, 418, 419, 418, + 419, 418, 418, 419, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 419, + 419, 419, 418, 421, 422, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 433, + 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 418, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 418, 418, + 418, 418, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 418, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 418, + 418, 418, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 419, + 418, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, + 476, 477, 478, 479, 480, 481, 482, 483, + 484, 485, 486, 487, 488, 425, 489, 490, + 491, 492, 493, 494, 425, 470, 425, 418, + 419, 418, 419, 419, 418, 418, 419, 418, + 418, 418, 418, 419, 418, 418, 418, 418, + 418, 419, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 418, 419, 419, 419, 418, 418, + 418, 419, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 418, 418, 418, + 418, 418, 419, 419, 419, 419, 418, 418, + 419, 419, 419, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 418, 419, 419, 418, + 418, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 419, 419, 419, 418, 418, + 418, 418, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 418, 419, 419, 419, 418, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 495, 496, 497, 498, 499, 500, + 501, 502, 503, 425, 504, 505, 506, 507, + 508, 418, 419, 418, 418, 418, 418, 418, + 419, 419, 418, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 418, 418, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 418, 509, 464, 510, + 511, 512, 425, 513, 514, 470, 425, 418, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 418, + 419, 419, 418, 475, 418, 470, 425, 425, + 515, 418, 425, 418, 419, 470, 516, 517, + 470, 518, 519, 470, 520, 521, 522, 523, + 524, 525, 470, 526, 527, 528, 470, 529, + 530, 531, 489, 532, 533, 534, 489, 535, + 470, 425, 418, 418, 419, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 418, 418, 418, 418, 419, 419, 418, 418, + 419, 419, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 418, 418, 418, 419, 418, + 418, 418, 419, 419, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 418, 418, 418, + 419, 419, 419, 419, 418, 536, 537, 418, + 425, 418, 419, 418, 418, 419, 470, 538, + 539, 540, 541, 520, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 425, 418, 418, 419, 418, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 418, 419, 418, 418, 419, 418, 419, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 419, 418, 418, 419, 419, 419, 419, 418, + 419, 419, 418, 418, 419, 419, 419, 419, + 419, 418, 554, 555, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 560, 566, 567, + 568, 569, 565, 418, 570, 571, 470, 572, + 573, 574, 575, 576, 577, 578, 579, 580, + 470, 425, 581, 582, 583, 584, 470, 585, + 586, 587, 588, 589, 590, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 470, 501, + 425, 600, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 419, 419, 418, + 419, 418, 419, 419, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 419, 419, + 418, 418, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 419, + 418, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 418, 419, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 418, 419, 418, 419, 419, + 418, 419, 418, 419, 419, 418, 419, 418, + 419, 418, 601, 572, 602, 603, 604, 605, + 606, 607, 608, 609, 610, 453, 611, 470, + 612, 613, 614, 470, 615, 485, 616, 617, + 618, 619, 620, 621, 622, 623, 470, 418, + 418, 418, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 419, 419, 418, 419, 419, 419, 418, + 419, 418, 418, 418, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 418, + 418, 418, 419, 470, 539, 624, 625, 425, + 470, 425, 418, 418, 419, 418, 419, 470, + 624, 425, 418, 470, 626, 425, 418, 418, + 419, 470, 627, 628, 629, 530, 630, 631, + 470, 632, 633, 634, 425, 418, 418, 419, + 419, 419, 418, 419, 419, 418, 419, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 418, 419, 418, 470, 425, 418, 635, + 470, 636, 418, 425, 418, 419, 418, 419, + 637, 470, 638, 639, 418, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 640, 641, + 642, 470, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 425, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 418, 419, + 418, 418, 418, 418, 418, 418, 419, 419, + 419, 419, 419, 418, 418, 419, 418, 418, + 418, 419, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 657, 470, 658, 470, 659, 660, 661, + 662, 425, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 418, 663, 470, 425, 418, 419, + 664, 470, 455, 425, 418, 419, 665, 418, + 425, 418, 419, 470, 666, 425, 418, 418, + 419, 667, 418, 470, 668, 425, 418, 418, + 419, 670, 669, 419, 419, 419, 419, 670, + 669, 419, 670, 669, 670, 670, 419, 670, + 669, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 670, 670, 670, 670, 669, 419, 419, 670, + 670, 419, 670, 419, 670, 669, 670, 670, + 670, 670, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 670, 419, 670, 419, 670, + 669, 670, 670, 670, 670, 670, 419, 670, + 419, 670, 669, 419, 419, 670, 419, 670, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 670, 419, 670, + 419, 670, 669, 419, 670, 670, 670, 670, + 419, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 419, 670, 669, + 670, 670, 670, 419, 670, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 670, 670, 670, 419, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 670, 669, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 670, 419, 669, 670, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 669, 670, 670, 419, 670, 669, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 670, + 419, 419, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 419, 670, 669, 419, + 670, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 670, 670, + 670, 670, 419, 419, 670, 670, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 669, + 419, 670, 670, 419, 670, 419, 669, 670, + 670, 419, 670, 669, 419, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 670, 419, 670, 419, 669, + 670, 670, 669, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 419, 419, 670, 669, 670, + 419, 669, 670, 669, 419, 670, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 419, + 669, 670, 669, 419, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 669, 670, 419, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 419, 670, 669, 669, 419, 669, 419, + 670, 670, 419, 670, 670, 670, 670, 670, + 670, 670, 669, 419, 670, 670, 670, 419, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 670, 669, 419, + 419, 670, 669, 670, 419, 670, 669, 419, + 419, 670, 419, 419, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 669, 419, 670, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 669, 670, 419, 670, 670, 670, 669, + 419, 670, 419, 419, 670, 419, 669, 670, + 670, 669, 419, 670, 670, 670, 670, 419, + 670, 419, 669, 670, 670, 670, 419, 670, + 669, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 669, 670, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 669, 670, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 670, + 670, 670, 669, 419, 419, 419, 670, 669, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 669, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 669, 419, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 670, 670, 419, 670, 419, + 669, 419, 670, 669, 670, 419, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 670, 669, 670, 670, 670, 419, + 670, 419, 670, 419, 670, 419, 669, 670, + 670, 419, 419, 670, 669, 670, 419, 670, + 670, 669, 419, 670, 419, 670, 669, 419, + 419, 670, 670, 670, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 669, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 419, 669, 670, 419, 670, 670, 669, + 419, 670, 670, 419, 669, 670, 669, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 669, 419, 670, 419, 670, 419, + 670, 419, 670, 419, 670, 669, 671, 669, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 674, 683, 684, 685, 686, + 687, 674, 688, 689, 690, 691, 692, 693, + 694, 695, 696, 697, 698, 699, 700, 701, + 702, 674, 703, 671, 683, 671, 704, 671, + 669, 670, 670, 670, 670, 419, 669, 670, + 670, 669, 419, 670, 669, 419, 419, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 419, 670, 419, 669, 670, 670, 669, 419, + 670, 670, 670, 669, 419, 670, 419, 670, + 670, 669, 419, 419, 670, 419, 669, 670, + 669, 419, 670, 669, 419, 419, 670, 419, + 670, 669, 419, 670, 419, 419, 670, 419, + 670, 419, 669, 670, 670, 669, 419, 670, + 670, 419, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 419, 669, 419, 670, 670, + 670, 419, 670, 669, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 669, 705, 706, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 676, 717, 718, 719, 720, 721, 718, + 722, 723, 724, 725, 726, 727, 728, 729, + 730, 671, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 670, 419, 670, 669, + 670, 419, 670, 669, 670, 419, 419, 419, + 670, 669, 670, 419, 670, 669, 670, 670, + 670, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 419, 670, 669, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 669, 419, 670, 669, 670, 419, 670, + 669, 419, 670, 669, 419, 670, 669, 419, + 670, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 674, 731, 671, 674, 732, + 674, 733, 683, 671, 669, 670, 669, 419, + 670, 669, 419, 674, 732, 683, 671, 669, + 674, 734, 671, 683, 671, 669, 670, 669, + 419, 674, 735, 692, 736, 718, 737, 730, + 674, 738, 739, 740, 671, 683, 671, 669, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 419, 670, 419, 669, 670, 670, 669, + 419, 670, 419, 670, 669, 419, 670, 669, + 674, 683, 425, 669, 741, 674, 742, 683, + 671, 669, 425, 670, 669, 419, 670, 669, + 419, 743, 674, 744, 745, 671, 669, 419, + 670, 669, 670, 670, 669, 419, 419, 670, + 419, 670, 669, 674, 746, 747, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 671, + 683, 671, 669, 670, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 670, 670, 670, 670, 670, 669, 419, 670, + 670, 419, 670, 419, 669, 670, 419, 670, + 670, 670, 419, 670, 670, 419, 670, 670, + 419, 670, 670, 419, 670, 670, 669, 419, + 674, 757, 674, 733, 758, 759, 760, 671, + 683, 671, 669, 670, 669, 419, 670, 670, + 670, 419, 670, 670, 670, 419, 670, 419, + 670, 669, 419, 419, 419, 419, 670, 670, + 419, 419, 419, 419, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 419, 669, 670, 670, 670, 419, 670, 419, + 670, 669, 683, 425, 761, 674, 683, 425, + 670, 669, 419, 762, 674, 763, 683, 425, + 670, 669, 419, 670, 419, 764, 683, 671, + 669, 425, 670, 669, 419, 674, 765, 671, + 683, 671, 669, 670, 669, 419, 766, 766, + 766, 768, 769, 770, 766, 767, 767, 771, + 768, 771, 769, 771, 767, 772, 773, 772, + 775, 774, 776, 774, 777, 774, 779, 778, + 781, 782, 780, 781, 783, 780, 785, 784, + 786, 784, 787, 784, 789, 788, 791, 792, + 790, 791, 793, 790, 795, 795, 795, 795, + 794, 795, 795, 795, 794, 795, 794, 795, + 795, 794, 794, 794, 794, 794, 794, 795, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 794, 794, 795, 794, 794, 795, 794, + 795, 794, 794, 795, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 795, + 795, 795, 794, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 827, 828, 794, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 794, 794, + 794, 794, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 794, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 794, + 794, 794, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 795, + 794, 829, 830, 831, 832, 833, 834, 835, + 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 858, 859, + 860, 861, 862, 863, 864, 801, 865, 866, + 867, 868, 869, 870, 801, 846, 801, 794, + 795, 794, 795, 795, 794, 794, 795, 794, + 794, 794, 794, 795, 794, 794, 794, 794, + 794, 795, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 794, 795, 795, 795, 794, 794, + 794, 795, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 794, 794, 794, + 794, 794, 795, 795, 795, 795, 794, 794, + 795, 795, 795, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 794, 795, 795, 794, + 794, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 795, 795, 795, 794, 794, + 794, 794, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 794, 795, 795, 795, 794, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 871, 872, 873, 874, 875, 876, + 877, 878, 879, 801, 880, 881, 882, 883, + 884, 794, 795, 794, 794, 794, 794, 794, + 795, 795, 794, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 794, 794, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 794, 885, 840, 886, + 887, 888, 801, 889, 890, 846, 801, 794, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 794, + 795, 795, 794, 851, 794, 846, 801, 801, + 891, 794, 801, 794, 795, 846, 892, 893, + 846, 894, 895, 846, 896, 897, 898, 899, + 900, 901, 846, 902, 903, 904, 846, 905, + 906, 907, 865, 908, 909, 910, 865, 911, + 846, 801, 794, 794, 795, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 794, 794, 794, 794, 795, 795, 794, 794, + 795, 795, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 794, 794, 794, 795, 794, + 794, 794, 795, 795, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 794, 794, 794, + 795, 795, 795, 795, 794, 912, 913, 794, + 801, 794, 795, 794, 794, 795, 846, 914, + 915, 916, 917, 896, 918, 919, 920, 921, + 922, 923, 924, 925, 926, 927, 928, 929, + 801, 794, 794, 795, 794, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 794, 795, 794, 794, 795, 794, 795, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 795, 794, 794, 795, 795, 795, 795, 794, + 795, 795, 794, 794, 795, 795, 795, 795, + 795, 794, 930, 931, 932, 933, 934, 935, + 936, 937, 938, 939, 940, 936, 942, 943, + 944, 945, 941, 794, 946, 947, 846, 948, + 949, 950, 951, 952, 953, 954, 955, 956, + 846, 801, 957, 958, 959, 960, 846, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 846, 877, + 801, 976, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 795, 795, 794, + 795, 794, 795, 795, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 795, 795, + 794, 794, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 795, + 794, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 794, 795, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 794, 795, 794, 795, 795, + 794, 795, 794, 795, 795, 794, 795, 794, + 795, 794, 977, 948, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 829, 987, 846, + 988, 989, 990, 846, 991, 861, 992, 993, + 994, 995, 996, 997, 998, 999, 846, 794, + 794, 794, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 795, 795, 794, 795, 795, 795, 794, + 795, 794, 794, 794, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 794, + 794, 794, 795, 846, 915, 1000, 1001, 801, + 846, 801, 794, 794, 795, 794, 795, 846, + 1000, 801, 794, 846, 1002, 801, 794, 794, + 795, 846, 1003, 1004, 1005, 906, 1006, 1007, + 846, 1008, 1009, 1010, 801, 794, 794, 795, + 795, 795, 794, 795, 795, 794, 795, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 794, 795, 794, 846, 801, 794, 1011, + 846, 1012, 794, 801, 794, 795, 794, 795, + 1013, 846, 1014, 1015, 794, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 1016, 1017, + 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024, + 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, + 801, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 794, 795, + 794, 794, 794, 794, 794, 794, 795, 795, + 795, 795, 795, 794, 794, 795, 794, 794, + 794, 795, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 1033, 846, 1034, 846, 1035, 1036, 1037, + 1038, 801, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 794, 1039, 846, 801, 794, 795, + 1040, 846, 831, 801, 794, 795, 1041, 794, + 801, 794, 795, 846, 1042, 801, 794, 794, + 795, 1043, 794, 846, 1044, 801, 794, 794, + 795, 1046, 1045, 795, 795, 795, 795, 1046, + 1045, 795, 1046, 1045, 1046, 1046, 795, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1045, 795, 795, 1046, + 1046, 795, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 1046, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046, + 795, 1046, 1045, 795, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 1046, 795, 1046, + 795, 1046, 1045, 795, 1046, 1046, 1046, 1046, + 795, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1046, 1045, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 795, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1046, 1046, + 1046, 1046, 795, 795, 1046, 1046, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1046, 795, 1046, 795, 1045, + 1046, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1045, 1046, 1045, 795, 1046, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 1045, 1046, 795, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 795, 1046, 1045, 1045, 795, 1045, 795, + 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1045, 795, 1046, 1046, 1046, 795, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 795, 1046, 795, 795, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1045, 1046, 795, 1046, 1046, 1046, 1045, + 795, 1046, 795, 795, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1046, 1046, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1046, 795, 1046, + 1045, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 1045, 1046, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 1046, + 1046, 1046, 1045, 795, 795, 795, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 1046, 1046, 795, 1046, 795, + 1045, 795, 1046, 1045, 1046, 795, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 1045, 1046, 1046, 1046, 795, + 1046, 795, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 795, 1046, 1045, 1046, 795, 1046, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1046, 1046, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1045, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1045, 1046, 795, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 795, 1046, 1045, 1047, 1045, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062, + 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069, + 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047, + 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1045, 795, 795, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 795, 1046, 795, 1045, 1046, 1046, 1045, 795, + 1046, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1046, 1045, 795, 795, 1046, 795, 1045, 1046, + 1045, 795, 1046, 1045, 795, 795, 1046, 795, + 1046, 1045, 795, 1046, 795, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 795, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1045, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, + 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 1045, 1046, 795, 795, 795, + 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 795, 1046, 1045, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 795, 1046, + 1045, 795, 1046, 1045, 795, 1046, 1045, 795, + 1046, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108, + 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045, + 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045, + 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106, + 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1045, 1046, 1046, 1045, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059, + 1047, 1045, 801, 1046, 1045, 795, 1046, 1045, + 795, 1119, 1050, 1120, 1121, 1047, 1045, 795, + 1046, 1045, 1046, 1046, 1045, 795, 795, 1046, + 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047, + 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 795, 1045, 1046, 795, 1046, + 1046, 1046, 795, 1046, 1046, 795, 1046, 1046, + 795, 1046, 1046, 795, 1046, 1046, 1045, 795, + 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 795, 795, 795, 795, 1046, 1046, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1045, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 1059, 801, 1137, 1050, 1059, 801, + 1046, 1045, 795, 1138, 1050, 1139, 1059, 801, + 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, + 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, + 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151, + 1152, 1153, 1154, 670, 670, 419, 1155, 1156, + 1157, 1158, 670, 1161, 1162, 1164, 1165, 1166, + 1160, 1167, 1168, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, + 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190, + 1191, 1192, 1193, 670, 1148, 7, 1148, 419, + 1148, 419, 1160, 1163, 1187, 1194, 1159, 1142, + 1142, 1195, 1143, 1196, 1198, 1197, 4, 1147, + 1200, 1197, 1201, 1197, 2, 1147, 1197, 6, + 8, 8, 7, 1202, 1203, 1204, 1197, 1205, + 1206, 1197, 1207, 1197, 419, 419, 1209, 1210, + 489, 470, 1211, 470, 1212, 1213, 1214, 1215, + 1216, 1217, 1218, 1219, 1220, 1221, 1222, 544, + 1223, 520, 1224, 1225, 1226, 1227, 1228, 1229, + 1230, 1231, 1232, 1233, 1234, 1235, 419, 419, + 419, 425, 565, 1208, 1236, 1197, 1237, 1197, + 670, 1238, 419, 419, 419, 670, 1238, 670, + 670, 419, 1238, 419, 1238, 419, 1238, 419, + 670, 670, 670, 670, 670, 1238, 419, 670, + 670, 670, 419, 670, 419, 1238, 419, 670, + 670, 670, 670, 419, 1238, 670, 419, 670, + 419, 670, 419, 670, 670, 419, 670, 1238, + 419, 670, 419, 670, 419, 670, 1238, 670, + 419, 1238, 670, 419, 670, 419, 1238, 670, + 670, 670, 670, 670, 1238, 419, 419, 670, + 419, 670, 1238, 670, 419, 1238, 670, 670, + 1238, 419, 419, 670, 419, 670, 419, 670, + 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, + 1246, 1247, 1248, 1249, 715, 1250, 1251, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1260, 1262, 1263, 1264, 1265, 1266, 671, + 1238, 1267, 1268, 1269, 1270, 1271, 1272, 1273, + 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, + 1282, 1283, 1284, 1285, 725, 1286, 1287, 1288, + 692, 1289, 1290, 1291, 1292, 1293, 1294, 671, + 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, + 674, 1303, 671, 674, 1304, 1305, 1306, 1307, + 683, 1238, 1308, 1309, 1310, 1311, 703, 1312, + 1313, 683, 1314, 1315, 1316, 1317, 1318, 671, + 1238, 1319, 1278, 1320, 1321, 1322, 683, 1323, + 1324, 674, 671, 683, 425, 1238, 1288, 671, + 674, 683, 425, 683, 425, 1325, 683, 1238, + 425, 674, 1326, 1327, 674, 1328, 1329, 681, + 1330, 1331, 1332, 1333, 1334, 1284, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1303, 1347, 674, 683, 425, 1238, + 1348, 1349, 683, 671, 1238, 425, 671, 1238, + 674, 1350, 731, 1351, 1352, 1353, 1354, 1355, + 1356, 1357, 1358, 671, 1359, 1360, 1361, 1362, + 1363, 1364, 671, 683, 1238, 1366, 1367, 1368, + 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, + 1372, 1378, 1379, 1380, 1381, 1365, 1377, 1365, + 1238, 1365, 1238, 1382, 1382, 1383, 1384, 1385, + 1386, 1387, 1388, 1389, 1390, 1387, 767, 1391, + 1391, 1391, 1392, 1391, 1391, 768, 769, 770, + 1391, 767, 1382, 1382, 1393, 1396, 1397, 1395, + 1398, 1399, 1398, 1400, 1391, 1402, 1401, 1396, + 1403, 1395, 1405, 1404, 1394, 1394, 1394, 768, + 769, 770, 1394, 767, 767, 1406, 773, 1406, + 1407, 1406, 775, 1408, 1409, 1410, 1411, 1412, + 1413, 1414, 1411, 776, 775, 1408, 1415, 1415, + 777, 779, 1416, 1415, 776, 1418, 1419, 1417, + 1418, 1419, 1420, 1417, 775, 1408, 1421, 1415, + 775, 1408, 1415, 1423, 1422, 1425, 1424, 776, + 1426, 777, 1426, 779, 1426, 785, 1427, 1428, + 1429, 1430, 1431, 1432, 1433, 1430, 786, 785, + 1427, 1434, 1434, 787, 789, 1435, 1434, 786, + 1437, 1438, 1436, 1437, 1438, 1439, 1436, 785, + 1427, 1440, 1434, 785, 1427, 1434, 1442, 1441, + 1444, 1443, 786, 1445, 787, 1445, 789, 1445, + 795, 1448, 1449, 1451, 1452, 1453, 1447, 1454, + 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, + 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, + 1471, 1472, 1473, 1475, 1476, 1477, 1478, 1479, + 1480, 795, 795, 1446, 1447, 1450, 1474, 1481, + 1446, 1046, 795, 795, 1483, 1484, 865, 846, + 1485, 846, 1486, 1487, 1488, 1489, 1490, 1491, + 1492, 1493, 1494, 1495, 1496, 920, 1497, 896, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1509, 795, 795, 795, 801, + 941, 1482, 1046, 1510, 795, 795, 795, 1046, + 1510, 1046, 1046, 795, 1510, 795, 1510, 795, + 1510, 795, 1046, 1046, 1046, 1046, 1046, 1510, + 795, 1046, 1046, 1046, 795, 1046, 795, 1510, + 795, 1046, 1046, 1046, 1046, 795, 1510, 1046, + 795, 1046, 795, 1046, 795, 1046, 1046, 795, + 1046, 1510, 795, 1046, 795, 1046, 795, 1046, + 1510, 1046, 795, 1510, 1046, 795, 1046, 795, + 1510, 1046, 1046, 1046, 1046, 1046, 1510, 795, + 795, 1046, 795, 1046, 1510, 1046, 795, 1510, + 1046, 1046, 1510, 795, 795, 1046, 795, 1046, + 795, 1046, 1510, 1511, 1512, 1513, 1514, 1515, + 1516, 1517, 1518, 1519, 1520, 1521, 1091, 1522, + 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, + 1531, 1532, 1533, 1532, 1534, 1535, 1536, 1537, + 1538, 1047, 1510, 1539, 1540, 1541, 1542, 1543, + 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, + 1552, 1553, 1554, 1555, 1556, 1557, 1101, 1558, + 1559, 1560, 1068, 1561, 1562, 1563, 1564, 1565, + 1566, 1047, 1567, 1568, 1569, 1570, 1571, 1572, + 1573, 1574, 1050, 1575, 1047, 1050, 1576, 1577, + 1578, 1579, 1059, 1510, 1580, 1581, 1582, 1583, + 1079, 1584, 1585, 1059, 1586, 1587, 1588, 1589, + 1590, 1047, 1510, 1591, 1550, 1592, 1593, 1594, + 1059, 1595, 1596, 1050, 1047, 1059, 801, 1510, + 1560, 1047, 1050, 1059, 801, 1059, 801, 1597, + 1059, 1510, 801, 1050, 1598, 1599, 1050, 1600, + 1601, 1057, 1602, 1603, 1604, 1605, 1606, 1556, + 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, + 1615, 1616, 1617, 1618, 1575, 1619, 1050, 1059, + 801, 1510, 1620, 1621, 1059, 1047, 1510, 801, + 1047, 1510, 1050, 1622, 1107, 1623, 1624, 1625, + 1626, 1627, 1628, 1629, 1630, 1047, 1631, 1632, + 1633, 1634, 1635, 1636, 1047, 1059, 1510, 1638, + 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, + 1647, 1648, 1644, 1650, 1651, 1652, 1653, 1637, + 1649, 1637, 1510, 1637, 1510, +} + +var _hcltok_trans_targs []int16 = []int16{ + 1459, 1459, 2, 3, 1459, 1459, 4, 1467, + 5, 6, 8, 9, 286, 12, 13, 14, + 15, 16, 287, 288, 19, 289, 21, 22, + 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299, 328, 348, 353, 127, 128, 129, + 356, 151, 371, 375, 1459, 10, 11, 17, + 18, 20, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 64, 105, 120, 131, + 154, 170, 283, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, + 121, 122, 123, 124, 125, 126, 130, 132, + 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 152, 153, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 171, 203, 227, 230, 231, + 233, 242, 243, 246, 250, 268, 275, 277, + 279, 281, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, + 202, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, + 228, 229, 232, 234, 235, 236, 237, 238, + 239, 240, 241, 244, 245, 247, 248, 249, + 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, + 267, 269, 270, 271, 272, 273, 274, 276, + 278, 280, 282, 284, 285, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 329, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 349, 350, 351, 352, + 354, 355, 357, 358, 359, 360, 361, 362, + 363, 364, 365, 366, 367, 368, 369, 370, + 372, 373, 374, 376, 382, 404, 409, 411, + 413, 377, 378, 379, 380, 381, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, + 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 405, 406, 407, 408, 410, + 412, 414, 1459, 1471, 1459, 437, 438, 439, + 440, 417, 441, 442, 443, 444, 445, 446, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, + 463, 464, 465, 466, 467, 469, 470, 471, + 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 419, 486, + 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 418, 504, 505, 506, 507, 508, 510, + 511, 512, 513, 514, 515, 516, 517, 518, + 519, 520, 521, 522, 523, 525, 526, 527, + 528, 529, 530, 534, 536, 537, 538, 539, + 434, 540, 541, 542, 543, 544, 545, 546, + 547, 548, 549, 550, 551, 552, 553, 554, + 556, 557, 559, 560, 561, 562, 563, 564, + 432, 565, 566, 567, 568, 569, 570, 571, + 572, 573, 575, 607, 631, 634, 635, 637, + 646, 647, 650, 654, 672, 532, 679, 681, + 683, 685, 576, 577, 578, 579, 580, 581, + 582, 583, 584, 585, 586, 587, 588, 589, + 590, 591, 592, 593, 594, 595, 596, 597, + 598, 599, 600, 601, 602, 603, 604, 605, + 606, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, + 623, 624, 625, 626, 627, 628, 629, 630, + 632, 633, 636, 638, 639, 640, 641, 642, + 643, 644, 645, 648, 649, 651, 652, 653, + 655, 656, 657, 658, 659, 660, 661, 662, + 663, 664, 665, 666, 667, 668, 669, 670, + 671, 673, 674, 675, 676, 677, 678, 680, + 682, 684, 686, 688, 689, 1459, 1459, 690, + 827, 828, 759, 829, 830, 831, 832, 833, + 834, 788, 835, 724, 836, 837, 838, 839, + 840, 841, 842, 843, 744, 844, 845, 846, + 847, 848, 849, 850, 851, 852, 853, 769, + 854, 856, 857, 858, 859, 860, 861, 862, + 863, 864, 865, 702, 866, 867, 868, 869, + 870, 871, 872, 873, 874, 740, 875, 876, + 877, 878, 879, 810, 881, 882, 885, 887, + 888, 889, 890, 891, 892, 895, 896, 898, + 899, 900, 902, 903, 904, 905, 906, 907, + 908, 909, 910, 911, 912, 914, 915, 916, + 917, 920, 922, 923, 925, 927, 1509, 1510, + 929, 930, 931, 1509, 1509, 932, 1523, 1523, + 1524, 935, 1523, 936, 1525, 1526, 1529, 1530, + 1534, 1534, 1535, 941, 1534, 942, 1536, 1537, + 1540, 1541, 1545, 1546, 1545, 968, 969, 970, + 971, 948, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, + 1011, 1012, 1013, 1014, 1015, 1016, 950, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, + 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, + 1034, 949, 1035, 1036, 1037, 1038, 1039, 1041, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058, + 1059, 1060, 1061, 1065, 1067, 1068, 1069, 1070, + 965, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1087, 1088, 1090, 1091, 1092, 1093, 1094, 1095, + 963, 1096, 1097, 1098, 1099, 1100, 1101, 1102, + 1103, 1104, 1106, 1138, 1162, 1165, 1166, 1168, + 1177, 1178, 1181, 1185, 1203, 1063, 1210, 1212, + 1214, 1216, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, + 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, + 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, + 1137, 1139, 1140, 1141, 1142, 1143, 1144, 1145, + 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, + 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, + 1163, 1164, 1167, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1179, 1180, 1182, 1183, 1184, + 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, + 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, + 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211, + 1213, 1215, 1217, 1219, 1220, 1545, 1545, 1221, + 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364, + 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370, + 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377, + 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1300, + 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, + 1394, 1395, 1396, 1233, 1397, 1398, 1399, 1400, + 1401, 1402, 1403, 1404, 1405, 1271, 1406, 1407, + 1408, 1409, 1410, 1341, 1412, 1413, 1416, 1418, + 1419, 1420, 1421, 1422, 1423, 1426, 1427, 1429, + 1430, 1431, 1433, 1434, 1435, 1436, 1437, 1438, + 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447, + 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459, + 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466, + 1468, 1469, 1470, 1459, 1472, 1459, 1473, 1459, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1459, 1459, 1459, 1459, 1459, + 1459, 1, 1459, 7, 1459, 1459, 1459, 1459, + 1459, 415, 416, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 433, + 435, 436, 468, 509, 524, 531, 533, 535, + 555, 558, 574, 687, 1459, 1459, 1459, 691, + 692, 693, 694, 695, 696, 697, 698, 699, + 700, 701, 703, 704, 705, 706, 707, 708, + 709, 710, 711, 712, 713, 714, 715, 716, + 717, 718, 719, 720, 721, 722, 723, 725, + 726, 727, 728, 729, 730, 731, 732, 733, + 734, 735, 736, 737, 738, 739, 741, 742, + 743, 745, 746, 747, 748, 749, 750, 751, + 752, 753, 754, 755, 756, 757, 758, 760, + 761, 762, 763, 764, 765, 766, 767, 768, + 770, 771, 772, 773, 774, 775, 776, 777, + 778, 779, 780, 781, 782, 783, 784, 785, + 786, 787, 789, 790, 791, 792, 793, 794, + 795, 796, 797, 798, 799, 800, 801, 802, + 803, 804, 805, 806, 807, 808, 809, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 822, 823, 824, 825, 826, 855, + 880, 883, 884, 886, 893, 894, 897, 901, + 913, 918, 919, 921, 924, 926, 1511, 1509, + 1512, 1517, 1519, 1509, 1520, 1521, 1522, 1509, + 928, 1509, 1509, 1513, 1514, 1516, 1509, 1515, + 1509, 1509, 1509, 1518, 1509, 1509, 1509, 933, + 934, 938, 939, 1523, 1531, 1532, 1533, 1523, + 937, 1523, 1523, 934, 1527, 1528, 1523, 1523, + 1523, 1523, 1523, 940, 944, 945, 1534, 1542, + 1543, 1544, 1534, 943, 1534, 1534, 940, 1538, + 1539, 1534, 1534, 1534, 1534, 1534, 1545, 1547, + 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, + 1580, 1581, 1545, 946, 947, 951, 952, 953, + 954, 955, 956, 957, 958, 959, 960, 961, + 962, 964, 966, 967, 999, 1040, 1055, 1062, + 1064, 1066, 1086, 1089, 1105, 1218, 1545, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1234, 1235, 1236, 1237, 1238, 1239, + 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, + 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1256, + 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, + 1265, 1266, 1267, 1268, 1269, 1270, 1272, 1273, + 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282, + 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1291, + 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, + 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, + 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, + 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, + 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1386, + 1411, 1414, 1415, 1417, 1424, 1425, 1428, 1432, + 1444, 1449, 1450, 1452, 1455, 1457, +} + +var _hcltok_trans_actions []byte = []byte{ + 145, 107, 0, 0, 91, 141, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 121, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 143, 193, 149, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 147, 125, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 31, 169, + 0, 0, 0, 35, 33, 0, 55, 41, + 175, 0, 53, 0, 175, 175, 0, 0, + 75, 61, 181, 0, 73, 0, 181, 181, + 0, 0, 85, 187, 89, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 87, 79, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 93, + 0, 0, 119, 0, 111, 0, 7, 7, + 7, 0, 0, 113, 0, 115, 0, 123, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 7, 7, + 7, 196, 196, 196, 196, 196, 196, 7, + 7, 196, 7, 127, 139, 135, 97, 133, + 103, 0, 129, 0, 101, 95, 109, 99, + 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 105, 117, 137, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 13, + 0, 0, 172, 17, 0, 7, 7, 23, + 0, 25, 27, 0, 0, 0, 151, 0, + 15, 19, 9, 0, 21, 11, 29, 0, + 0, 0, 0, 43, 0, 178, 178, 49, + 0, 157, 154, 1, 175, 175, 45, 37, + 47, 39, 51, 0, 0, 0, 63, 0, + 184, 184, 69, 0, 163, 160, 1, 181, + 181, 65, 57, 67, 59, 71, 77, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 7, 7, 7, + 190, 190, 190, 190, 190, 190, 7, 7, + 190, 7, 81, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 83, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 166, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 166, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_eof_trans []int16 = []int16{ + 0, 1, 1, 1, 6, 6, 6, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 419, + 419, 421, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 767, 772, 772, 772, 773, 773, 775, 775, + 775, 779, 0, 0, 785, 785, 785, 789, + 0, 0, 795, 795, 797, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 0, 1196, 1197, 1198, 1200, + 1198, 1198, 1198, 1203, 1198, 1198, 1198, 1209, + 1198, 1198, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 0, 1392, 1394, + 1395, 1399, 1399, 1392, 1402, 1395, 1405, 1395, + 1407, 1407, 1407, 0, 1416, 1418, 1418, 1416, + 1416, 1423, 1425, 1427, 1427, 1427, 0, 1435, + 1437, 1437, 1435, 1435, 1442, 1444, 1446, 1446, + 1446, 0, 1483, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, +} + +const hcltok_start int = 1459 +const hcltok_first_final int = 1459 +const hcltok_error int = 0 + +const hcltok_en_stringTemplate int = 1509 +const hcltok_en_heredocTemplate int = 1523 +const hcltok_en_bareTemplate int = 1534 +const hcltok_en_identOnly int = 1545 +const hcltok_en_main int = 1459 + +//line scan_tokens.rl:16 + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, + } + +//line scan_tokens.rl:305 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + +//line scan_tokens.rl:340 + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + +//line scan_tokens.go:4289 + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + +//line scan_tokens.go:4297 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_hcltok_from_state_actions[cs]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 3: +//line NONE:1 + ts = p + +//line scan_tokens.go:4320 + } + } + + _keys = int(_hcltok_key_offsets[cs]) + _trans = int(_hcltok_index_offsets[cs]) + + _klen = int(_hcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hcltok_indicies[_trans]) + _eof_trans: + cs = int(_hcltok_trans_targs[_trans]) + + if _hcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hcltok_trans_actions[_trans]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 0: +//line scan_tokens.rl:224 + p-- + + case 4: +//line NONE:1 + te = p + 1 + + case 5: +//line scan_tokens.rl:248 + act = 4 + case 6: +//line scan_tokens.rl:250 + act = 6 + case 7: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 8: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 9: +//line scan_tokens.rl:84 + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 10: +//line scan_tokens.rl:248 + te = p + 1 + { + token(TokenQuotedLit) + } + case 11: +//line scan_tokens.rl:251 + te = p + 1 + { + token(TokenBadUTF8) + } + case 12: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 13: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 14: +//line scan_tokens.rl:248 + te = p + p-- + { + token(TokenQuotedLit) + } + case 15: +//line scan_tokens.rl:249 + te = p + p-- + { + token(TokenQuotedNewline) + } + case 16: +//line scan_tokens.rl:250 + te = p + p-- + { + token(TokenInvalid) + } + case 17: +//line scan_tokens.rl:251 + te = p + p-- + { + token(TokenBadUTF8) + } + case 18: +//line scan_tokens.rl:248 + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 19: +//line scan_tokens.rl:251 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 20: +//line NONE:1 + switch act { + case 4: + { + p = (te) - 1 + token(TokenQuotedLit) + } + case 6: + { + p = (te) - 1 + token(TokenInvalid) + } + } + + case 21: +//line scan_tokens.rl:148 + act = 11 + case 22: +//line scan_tokens.rl:259 + act = 12 + case 23: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 24: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 25: +//line scan_tokens.rl:111 + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } + token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 26: +//line scan_tokens.rl:259 + te = p + 1 + { + token(TokenBadUTF8) + } + case 27: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 28: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 29: +//line scan_tokens.rl:148 + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 30: +//line scan_tokens.rl:259 + te = p + p-- + { + token(TokenBadUTF8) + } + case 31: +//line scan_tokens.rl:148 + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 32: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 11: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 12: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 33: +//line scan_tokens.rl:156 + act = 15 + case 34: +//line scan_tokens.rl:266 + act = 16 + case 35: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 36: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 37: +//line scan_tokens.rl:156 + te = p + 1 + { + token(TokenStringLit) + } + case 38: +//line scan_tokens.rl:266 + te = p + 1 + { + token(TokenBadUTF8) + } + case 39: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 40: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 41: +//line scan_tokens.rl:156 + te = p + p-- + { + token(TokenStringLit) + } + case 42: +//line scan_tokens.rl:266 + te = p + p-- + { + token(TokenBadUTF8) + } + case 43: +//line scan_tokens.rl:156 + p = (te) - 1 + { + token(TokenStringLit) + } + case 44: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 15: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 16: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 45: +//line scan_tokens.rl:270 + act = 17 + case 46: +//line scan_tokens.rl:271 + act = 18 + case 47: +//line scan_tokens.rl:271 + te = p + 1 + { + token(TokenBadUTF8) + } + case 48: +//line scan_tokens.rl:272 + te = p + 1 + { + token(TokenInvalid) + } + case 49: +//line scan_tokens.rl:270 + te = p + p-- + { + token(TokenIdent) + } + case 50: +//line scan_tokens.rl:271 + te = p + p-- + { + token(TokenBadUTF8) + } + case 51: +//line scan_tokens.rl:270 + p = (te) - 1 + { + token(TokenIdent) + } + case 52: +//line scan_tokens.rl:271 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 53: +//line NONE:1 + switch act { + case 17: + { + p = (te) - 1 + token(TokenIdent) + } + case 18: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 54: +//line scan_tokens.rl:278 + act = 22 + case 55: +//line scan_tokens.rl:301 + act = 39 + case 56: +//line scan_tokens.rl:280 + te = p + 1 + { + token(TokenComment) + } + case 57: +//line scan_tokens.rl:281 + te = p + 1 + { + token(TokenNewline) + } + case 58: +//line scan_tokens.rl:283 + te = p + 1 + { + token(TokenEqualOp) + } + case 59: +//line scan_tokens.rl:284 + te = p + 1 + { + token(TokenNotEqual) + } + case 60: +//line scan_tokens.rl:285 + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 61: +//line scan_tokens.rl:286 + te = p + 1 + { + token(TokenLessThanEq) + } + case 62: +//line scan_tokens.rl:287 + te = p + 1 + { + token(TokenAnd) + } + case 63: +//line scan_tokens.rl:288 + te = p + 1 + { + token(TokenOr) + } + case 64: +//line scan_tokens.rl:289 + te = p + 1 + { + token(TokenEllipsis) + } + case 65: +//line scan_tokens.rl:290 + te = p + 1 + { + token(TokenFatArrow) + } + case 66: +//line scan_tokens.rl:291 + te = p + 1 + { + selfToken() + } + case 67: +//line scan_tokens.rl:180 + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 68: +//line scan_tokens.rl:185 + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 69: +//line scan_tokens.rl:197 + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 70: +//line scan_tokens.rl:79 + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1509 + goto _again + } + } + case 71: +//line scan_tokens.rl:89 + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 1: +//line NONE:1 + ts = 0 + + case 2: +//line NONE:1 + act = 0 + +//line scan_tokens.go:5073 + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _hcltok_eof_trans[cs] > 0 { + _trans = int(_hcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line scan_tokens.rl:363 + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl new file mode 100644 index 000000000..942ad92ba --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl @@ -0,0 +1,395 @@ + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl/v2" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. +%%{ + # (except when you are actually in scan_tokens.rl here, so edit away!) + + machine hcltok; + write data; +}%% + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, + } + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BrokenUTF8 = any - AnyUTF8; + + NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); + NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); + Ident = (ID_Start | '_') (ID_Continue | '-')*; + + # Symbols that just represent themselves are handled as a single rule. + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'"; + + EqualOp = "=="; + NotEqual = "!="; + GreaterThanEqual = ">="; + LessThanEqual = "<="; + LogicalAnd = "&&"; + LogicalOr = "||"; + + Ellipsis = "..."; + FatArrow = "=>"; + + Newline = '\r' ? '\n'; + EndOfLine = Newline; + + BeginStringTmpl = '"'; + BeginHeredocTmpl = '<<' ('-')? Ident Newline; + + Comment = ( + # The :>> operator in these is a "finish-guarded concatenation", + # which terminates the sequence on its left when it completes + # the sequence on its right. + # In the single-line comment cases this is allowing us to make + # the trailing EndOfLine optional while still having the overall + # pattern terminate. In the multi-line case it ensures that + # the first comment in the file ends at the first */, rather than + # gobbling up all of the "any*" until the _final_ */ in the file. + ("#" (any - EndOfLine)* :>> EndOfLine?) | + ("//" (any - EndOfLine)* :>> EndOfLine?) | + ("/*" any* :>> "*/") + ); + + # Note: hclwrite assumes that only ASCII spaces appear between tokens, + # and uses this assumption to recreate the spaces between tokens by + # looking at byte offset differences. This means it will produce + # incorrect results in the presence of tabs, but that's acceptable + # because the canonical style (which hclwrite itself can impose + # automatically is to never use tabs). + Spaces = (' ' | 0x09)+; + + action beginStringTemplate { + token(TokenOQuote); + fcall stringTemplate; + } + + action endStringTemplate { + token(TokenCQuote); + fret; + } + + action beginHeredocTemplate { + token(TokenOHeredoc); + // the token is currently the whole heredoc introducer, like + // < 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action beginTemplateControl { + token(TokenTemplateControl); + braces++; + retBraces = append(retBraces, braces); + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action openBrace { + token(TokenOBrace); + braces++; + } + + action closeBrace { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + token(TokenCBrace); + braces--; + } + } + + action closeTemplateSeqEatWhitespace { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd); + braces--; + } + } + + TemplateInterp = "${" ("~")?; + TemplateControl = "%{" ("~")?; + EndStringTmpl = '"'; + NewlineChars = ("\r"|"\n"); + NewlineCharsSeq = NewlineChars+; + StringLiteralChars = (AnyUTF8 - NewlineChars); + TemplateIgnoredNonBrace = (^'{' %{ fhold; }); + TemplateNotInterp = '$' (TemplateIgnoredNonBrace | TemplateInterp); + TemplateNotControl = '%' (TemplateIgnoredNonBrace | TemplateControl); + QuotedStringLiteralWithEsc = ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"' | "\\")); + TemplateStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (QuotedStringLiteralWithEsc)+ + ); + HeredocStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (StringLiteralChars - ("$" | '%'))* + ); + BareStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (StringLiteralChars - ("$" | '%'))* + ) Newline?; + + stringTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + EndStringTmpl => endStringTemplate; + TemplateStringLiteral => { token(TokenQuotedLit); }; + NewlineCharsSeq => { token(TokenQuotedNewline); }; + AnyUTF8 => { token(TokenInvalid); }; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + heredocTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + HeredocStringLiteral EndOfLine => heredocLiteralEOL; + HeredocStringLiteral => heredocLiteralMidline; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + bareTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + BareStringLiteral => bareTemplateLiteral; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + identOnly := |* + Ident => { token(TokenIdent) }; + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + main := |* + Spaces => {}; + NumberLit => { token(TokenNumberLit) }; + Ident => { token(TokenIdent) }; + + Comment => { token(TokenComment) }; + Newline => { token(TokenNewline) }; + + EqualOp => { token(TokenEqualOp); }; + NotEqual => { token(TokenNotEqual); }; + GreaterThanEqual => { token(TokenGreaterThanEq); }; + LessThanEqual => { token(TokenLessThanEq); }; + LogicalAnd => { token(TokenAnd); }; + LogicalOr => { token(TokenOr); }; + Ellipsis => { token(TokenEllipsis); }; + FatArrow => { token(TokenFatArrow); }; + SelfToken => { selfToken() }; + + "{" => openBrace; + "}" => closeBrace; + + "~}" => closeTemplateSeqEatWhitespace; + + BeginStringTmpl => beginStringTemplate; + BeginHeredocTmpl => beginHeredocTemplate; + + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + %%{ + prepush { + stack = append(stack, 0); + } + postpop { + stack = stack[:len(stack)-1]; + } + }%% + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func (ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func () { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + %%{ + write init nocs; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md new file mode 100644 index 000000000..3fc5f5f1b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md @@ -0,0 +1,941 @@ +# HCL Native Syntax Specification + +This is the specification of the syntax and semantics of the native syntax +for HCL. HCL is a system for defining configuration languages for applications. +The HCL information model is designed to support multiple concrete syntaxes +for configuration, but this native syntax is considered the primary format +and is optimized for human authoring and maintenance, as opposed to machine +generation of configuration. + +The language consists of three integrated sub-languages: + +- The _structural_ language defines the overall hierarchical configuration + structure, and is a serialization of HCL bodies, blocks and attributes. + +- The _expression_ language is used to express attribute values, either as + literals or as derivations of other values. + +- The _template_ language is used to compose values together into strings, + as one of several types of expression in the expression language. + +In normal use these three sub-languages are used together within configuration +files to describe an overall configuration, with the structural language +being used at the top level. The expression and template languages can also +be used in isolation, to implement features such as REPLs, debuggers, and +integration into more limited HCL syntaxes such as the JSON profile. + +## Syntax Notation + +Within this specification a semi-formal notation is used to illustrate the +details of syntax. This notation is intended for human consumption rather +than machine consumption, with the following conventions: + +- A naked name starting with an uppercase letter is a global production, + common to all of the syntax specifications in this document. +- A naked name starting with a lowercase letter is a local production, + meaningful only within the specification where it is defined. +- Double and single quotes (`"` and `'`) are used to mark literal character + sequences, which may be either punctuation markers or keywords. +- The default operator for combining items, which has no punctuation, + is concatenation. +- The symbol `|` indicates that any one of its left and right operands may + be present. +- The `*` symbol indicates zero or more repetitions of the item to its left. +- The `?` symbol indicates zero or one of the item to its left. +- Parentheses (`(` and `)`) are used to group items together to apply + the `|`, `*` and `?` operators to them collectively. + +The grammar notation does not fully describe the language. The prose may +augment or conflict with the illustrated grammar. In case of conflict, prose +has priority. + +## Source Code Representation + +Source code is unicode text expressed in the UTF-8 encoding. The language +itself does not perform unicode normalization, so syntax features such as +identifiers are sequences of unicode code points and so e.g. a precombined +accented character is distinct from a letter associated with a combining +accent. (String literals have some special handling with regard to Unicode +normalization which will be covered later in the relevant section.) + +UTF-8 encoded Unicode byte order marks are not permitted. Invalid or +non-normalized UTF-8 encoding is always a parse error. + +## Lexical Elements + +### Comments and Whitespace + +Comments and Whitespace are recognized as lexical elements but are ignored +except as described below. + +Whitespace is defined as a sequence of zero or more space characters +(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A) +are _not_ considered whitespace but are ignored as such in certain contexts. +Horizontal tab characters (U+0009) are also treated as whitespace, but are +counted only as one "column" for the purpose of reporting source positions. + +Comments serve as program documentation and come in two forms: + +- _Line comments_ start with either the `//` or `#` sequences and end with + the next newline sequence. A line comment is considered equivalent to a + newline sequence. + +- _Inline comments_ start with the `/*` sequence and end with the `*/` + sequence, and may have any characters within except the ending sequence. + An inline comments is considered equivalent to a whitespace sequence. + +Comments and whitespace cannot begin within within other comments, or within +template literals except inside an interpolation sequence or template directive. + +### Identifiers + +Identifiers name entities such as blocks, attributes and expression variables. +Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically, +their syntax is defined in terms of the `ID_Start` and `ID_Continue` +character properties as follows: + +```ebnf +Identifier = ID_Start (ID_Continue | '-')*; +``` + +The Unicode specification provides the normative requirements for identifier +parsing. Non-normatively, the spirit of this specification is that `ID_Start` +consists of Unicode letter and certain unambiguous punctuation tokens, while +`ID_Continue` augments that set with Unicode digits, combining marks, etc. + +The dash character `-` is additionally allowed in identifiers, even though +that is not part of the unicode `ID_Continue` definition. This is to allow +attribute names and block type names to contain dashes, although underscores +as word separators are considered the idiomatic usage. + +[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" + +### Keywords + +There are no globally-reserved words, but in some contexts certain identifiers +are reserved to function as keywords. These are discussed further in the +relevant documentation sections that follow. In such situations, the +identifier's role as a keyword supersedes any other valid interpretation that +may be possible. Outside of these specific situations, the keywords have no +special meaning and are interpreted as regular identifiers. + +### Operators and Delimiters + +The following character sequences represent operators, delimiters, and other +special tokens: + +``` ++ && == < : { [ ( ${ +- || != > ? } ] ) %{ +* ! <= = . +/ >= => , +% ... +``` + +### Numeric Literals + +A numeric literal is a decimal representation of a +real number. It has an integer part, a fractional part, +and an exponent part. + +```ebnf +NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?; +decimal = '0' .. '9'; +expmark = ('e' | 'E') ("+" | "-")?; +``` + +## Structural Elements + +The structural language consists of syntax representing the following +constructs: + +- _Attributes_, which assign a value to a specified name. +- _Blocks_, which create a child body annotated by a type and optional labels. +- _Body Content_, which consists of a collection of attributes and blocks. + +These constructs correspond to the similarly-named concepts in the +language-agnostic HCL information model. + +```ebnf +ConfigFile = Body; +Body = (Attribute | Block | OneLineBlock)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; +OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline; +``` + +### Configuration Files + +A _configuration file_ is a sequence of characters whose top-level is +interpreted as a Body. + +### Bodies + +A _body_ is a collection of associated attributes and blocks. The meaning of +this association is defined by the calling application. + +### Attribute Definitions + +An _attribute definition_ assigns a value to a particular attribute name within +a body. Each distinct attribute name may be defined no more than once within a +single body. + +The attribute value is given as an expression, which is retained literally +for later evaluation by the calling application. + +### Blocks + +A _block_ creates a child body that is annotated with a block _type_ and +zero or more block _labels_. Blocks create a structural hierarchy which can be +interpreted by the calling application. + +Block labels can either be quoted literal strings or naked identifiers. + +## Expressions + +The expression sub-language is used within attribute definitions to specify +values. + +```ebnf +Expression = ( + ExprTerm | + Operation | + Conditional +); +``` + +### Types + +The value types used within the expression language are those defined by the +syntax-agnostic HCL information model. An expression may return any valid +type, but only a subset of the available types have first-class syntax. +A calling application may make other types available via _variables_ and +_functions_. + +### Expression Terms + +Expression _terms_ are the operands for unary and binary expressions, as well +as acting as expressions in their own right. + +```ebnf +ExprTerm = ( + LiteralValue | + CollectionValue | + TemplateExpr | + VariableExpr | + FunctionCall | + ForExpr | + ExprTerm Index | + ExprTerm GetAttr | + ExprTerm Splat | + "(" Expression ")" +); +``` + +The productions for these different term types are given in their corresponding +sections. + +Between the `(` and `)` characters denoting a sub-expression, newline +characters are ignored as whitespace. + +### Literal Values + +A _literal value_ immediately represents a particular value of a primitive +type. + +```ebnf +LiteralValue = ( + NumericLit | + "true" | + "false" | + "null" +); +``` + +- Numeric literals represent values of type _number_. +- The `true` and `false` keywords represent values of type _bool_. +- The `null` keyword represents a null value of the dynamic pseudo-type. + +String literals are not directly available in the expression sub-language, but +are available via the template sub-language, which can in turn be incorporated +via _template expressions_. + +### Collection Values + +A _collection value_ combines zero or more other expressions to produce a +collection value. + +```ebnf +CollectionValue = tuple | object; +tuple = "[" ( + (Expression ("," Expression)* ","?)? +) "]"; +object = "{" ( + (objectelem ("," objectelem)* ","?)? +) "}"; +objectelem = (Identifier | Expression) "=" Expression; +``` + +Only tuple and object values can be directly constructed via native syntax. +Tuple and object values can in turn be converted to list, set and map values +with other operations, which behaves as defined by the syntax-agnostic HCL +information model. + +When specifying an object element, an identifier is interpreted as a literal +attribute name as opposed to a variable reference. To populate an item key +from a variable, use parentheses to disambiguate: + +- `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken + from the variable named `foo`. + +Between the open and closing delimiters of these sequences, newline sequences +are ignored as whitespace. + +There is a syntax ambiguity between _for expressions_ and collection values +whose first element is a reference to a variable named `for`. The +_for expression_ interpretation has priority, so to produce a tuple whose +first element is the value of a variable named `for`, or an object with a +key named `for`, use parentheses to disambiguate: + +- `[for, foo, baz]` is a syntax error. +- `[(for), foo, baz]` is a tuple whose first element is the value of variable + `for`. +- `{for: 1, baz: 2}` is a syntax error. +- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the + ambiguity by reordering. + +### Template Expressions + +A _template expression_ embeds a program written in the template sub-language +as an expression. Template expressions come in two forms: + +- A _quoted_ template expression is delimited by quote characters (`"`) and + defines a template as a single-line expression with escape characters. +- A _heredoc_ template expression is introduced by a `<<` sequence and + defines a template via a multi-line sequence terminated by a user-chosen + delimiter. + +In both cases the template interpolation and directive syntax is available for +use within the delimiters, and any text outside of these special sequences is +interpreted as a literal string. + +In _quoted_ template expressions any literal string sequences within the +template behave in a special way: literal newline sequences are not permitted +and instead _escape sequences_ can be included, starting with the +backslash `\`: + +``` + \n Unicode newline control character + \r Unicode carriage return control character + \t Unicode tab control character + \" Literal quote mark, used to prevent interpretation as end of string + \\ Literal backslash, used to prevent interpretation as escape sequence + \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits) + \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) +``` + +The _heredoc_ template expression type is introduced by either `<<` or `<<-`, +followed by an identifier. The template expression ends when the given +identifier subsequently appears again on a line of its own. + +If a heredoc template is introduced with the `<<-` symbol, any literal string +at the start of each line is analyzed to find the minimum number of leading +spaces, and then that number of prefix spaces is removed from all line-leading +literal strings. The final closing marker may also have an arbitrary number +of spaces preceding it on its line. + +```ebnf +TemplateExpr = quotedTemplate | heredocTemplate; +quotedTemplate = (as defined in prose above); +heredocTemplate = ( + ("<<" | "<<-") Identifier Newline + (content as defined in prose above) + Identifier Newline +); +``` + +A quoted template expression containing only a single literal string serves +as a syntax for defining literal string _expressions_. In certain contexts +the template syntax is restricted in this manner: + +```ebnf +StringLit = '"' (quoted literals as defined in prose above) '"'; +``` + +The `StringLit` production permits the escape sequences discussed for quoted +template expressions as above, but does _not_ permit template interpolation +or directive sequences. + +### Variables and Variable Expressions + +A _variable_ is a value that has been assigned a symbolic name. Variables are +made available for use in expressions by the calling application, by populating +the _global scope_ used for expression evaluation. + +Variables can also be created by expressions themselves, which always creates +a _child scope_ that incorporates the variables from its parent scope but +(re-)defines zero or more names with new values. + +The value of a variable is accessed using a _variable expression_, which is +a standalone `Identifier` whose name corresponds to a defined variable: + +```ebnf +VariableExpr = Identifier; +``` + +Variables in a particular scope are immutable, but child scopes may _hide_ +a variable from an ancestor scope by defining a new variable of the same name. +When looking up variables, the most locally-defined variable of the given name +is used, and ancestor-scoped variables of the same name cannot be accessed. + +No direct syntax is provided for declaring or assigning variables, but other +expression constructs implicitly create child scopes and define variables as +part of their evaluation. + +### Functions and Function Calls + +A _function_ is an operation that has been assigned a symbolic name. Functions +are made available for use in expressions by the calling application, by +populating the _function table_ used for expression evaluation. + +The namespace of functions is distinct from the namespace of variables. A +function and a variable may share the same name with no implication that they +are in any way related. + +A function can be executed via a _function call_ expression: + +```ebnf +FunctionCall = Identifier "(" arguments ")"; +Arguments = ( + () || + (Expression ("," Expression)* ("," | "...")?) +); +``` + +The definition of functions and the semantics of calling them are defined by +the language-agnostic HCL information model. The given arguments are mapped +onto the function's _parameters_ and the result of a function call expression +is the return value of the named function when given those arguments. + +If the final argument expression is followed by the ellipsis symbol (`...`), +the final argument expression must evaluate to either a list or tuple value. +The elements of the value are each mapped to a single parameter of the +named function, beginning at the first parameter remaining after all other +argument expressions have been mapped. + +Within the parentheses that delimit the function arguments, newline sequences +are ignored as whitespace. + +### For Expressions + +A _for expression_ is a construct for constructing a collection by projecting +the items from another collection. + +```ebnf +ForExpr = forTupleExpr | forObjectExpr; +forTupleExpr = "[" forIntro Expression forCond? "]"; +forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}"; +forIntro = "for" Identifier ("," Identifier)? "in" Expression ":"; +forCond = "if" Expression; +``` + +The punctuation used to delimit a for expression decide whether it will produce +a tuple value (`[` and `]`) or an object value (`{` and `}`). + +The "introduction" is equivalent in both cases: the keyword `for` followed by +either one or two identifiers separated by a comma which define the temporary +variable names used for iteration, followed by the keyword `in` and then +an expression that must evaluate to a value that can be iterated. The +introduction is then terminated by the colon (`:`) symbol. + +If only one identifier is provided, it is the name of a variable that will +be temporarily assigned the value of each element during iteration. If both +are provided, the first is the key and the second is the value. + +Tuple, object, list, map, and set types are iterable. The type of collection +used defines how the key and value variables are populated: + +- For tuple and list types, the _key_ is the zero-based index into the + sequence for each element, and the _value_ is the element value. The + elements are visited in index order. +- For object and map types, the _key_ is the string attribute name or element + key, and the _value_ is the attribute or element value. The elements are + visited in the order defined by a lexicographic sort of the attribute names + or keys. +- For set types, the _key_ and _value_ are both the element value. The elements + are visited in an undefined but consistent order. + +The expression after the colon and (in the case of object `for`) the expression +after the `=>` are both evaluated once for each element of the source +collection, in a local scope that defines the key and value variable names +specified. + +The results of evaluating these expressions for each input element are used +to populate an element in the new collection. In the case of tuple `for`, the +single expression becomes an element, appending values to the tuple in visit +order. In the case of object `for`, the pair of expressions is used as an +attribute name and value respectively, creating an element in the resulting +object. + +In the case of object `for`, it is an error if two input elements produce +the same result from the attribute name expression, since duplicate +attributes are not possible. If the ellipsis symbol (`...`) appears +immediately after the value expression, this activates the grouping mode in +which each value in the resulting object is a _tuple_ of all of the values +that were produced against each distinct key. + +- `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute + `a` is defined twice. +- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. + +If the `if` keyword is used after the element expression(s), it applies an +additional predicate that can be used to conditionally filter elements from +the source collection from consideration. The expression following `if` is +evaluated once for each source element, in the same scope used for the +element expression(s). It must evaluate to a boolean value; if `true`, the +element will be evaluated as normal, while if `false` the element will be +skipped. + +- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. + +If the collection value, element expression(s) or condition expression return +unknown values that are otherwise type-valid, the result is a value of the +dynamic pseudo-type. + +### Index Operator + +The _index_ operator returns the value of a single element of a collection +value. It is a postfix operator and can be applied to any value that has +a tuple, object, map, or list type. + +```ebnf +Index = "[" Expression "]"; +``` + +The expression delimited by the brackets is the _key_ by which an element +will be looked up. + +If the index operator is applied to a value of tuple or list type, the +key expression must be an non-negative integer number representing the +zero-based element index to access. If applied to a value of object or map +type, the key expression must be a string representing the attribute name +or element key. If the given key value is not of the appropriate type, a +conversion is attempted using the conversion rules from the HCL +syntax-agnostic information model. + +An error is produced if the given key expression does not correspond to +an element in the collection, either because it is of an unconvertable type, +because it is outside the range of elements for a tuple or list, or because +the given attribute or key does not exist. + +If either the collection or the key are an unknown value of an +otherwise-suitable type, the return value is an unknown value whose type +matches what type would be returned given known values, or a value of the +dynamic pseudo-type if type information alone cannot determine a suitable +return type. + +Within the brackets that delimit the index key, newline sequences are ignored +as whitespace. + +The HCL native syntax also includes a _legacy_ index operator that exists +only for compatibility with the precursor language HIL: + +```ebnf +LegacyIndex = '.' digit+ +``` + +This legacy index operator must be supported by parser for compatibility but +should not be used in new configurations. This allows an attribute-access-like +syntax for indexing, must still be interpreted as an index operation rather +than attribute access. + +The legacy syntax does not support chaining of index operations, like +`foo.0.0.bar`, because the interpretation of `0.0` as a number literal token +takes priority and thus renders the resulting sequence invalid. + +### Attribute Access Operator + +The _attribute access_ operator returns the value of a single attribute in +an object value. It is a postfix operator and can be applied to any value +that has an object type. + +```ebnf +GetAttr = "." Identifier; +``` + +The given identifier is interpreted as the name of the attribute to access. +An error is produced if the object to which the operator is applied does not +have an attribute with the given name. + +If the object is an unknown value of a type that has the attribute named, the +result is an unknown value of the attribute's type. + +### Splat Operators + +The _splat operators_ allow convenient access to attributes or elements of +elements in a tuple, list, or set value. + +There are two kinds of "splat" operator: + +- The _attribute-only_ splat operator supports only attribute lookups into + the elements from a list, but supports an arbitrary number of them. + +- The _full_ splat operator additionally supports indexing into the elements + from a list, and allows any combination of attribute access and index + operations. + +```ebnf +Splat = attrSplat | fullSplat; +attrSplat = "." "*" GetAttr*; +fullSplat = "[" "*" "]" (GetAttr | Index)*; +``` + +The splat operators can be thought of as shorthands for common operations that +could otherwise be performed using _for expressions_: + +- `tuple.*.foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar][0]`. +- `tuple[*].foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar[0]]` + +Note the difference in how the trailing index operator is interpreted in +each case. This different interpretation is the key difference between the +_attribute-only_ and _full_ splat operators. + +Splat operators have one additional behavior compared to the equivalent +_for expressions_ shown above: if a splat operator is applied to a value that +is _not_ of tuple, list, or set type, the value is coerced automatically into +a single-value list of the value type: + +- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` + is a single object. +- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` + is a single number. + +If applied to a null value that is not tuple, list, or set, the result is always +an empty tuple, which allows conveniently converting a possibly-null scalar +value into a tuple of zero or one elements. It is illegal to apply a splat +operator to a null value of tuple, list, or set type. + +### Operations + +Operations apply a particular operator to either one or two expression terms. + +```ebnf +Operation = unaryOp | binaryOp; +unaryOp = ("-" | "!") ExprTerm; +binaryOp = ExprTerm binaryOperator ExprTerm; +binaryOperator = compareOperator | arithmeticOperator | logicOperator; +compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; +arithmeticOperator = "+" | "-" | "*" | "/" | "%"; +logicOperator = "&&" | "||" | "!"; +``` + +The unary operators have the highest precedence. + +The binary operators are grouped into the following precedence levels: + +``` +Level Operators + 6 * / % + 5 + - + 4 > >= < <= + 3 == != + 2 && + 1 || +``` + +Higher values of "level" bind tighter. Operators within the same precedence +level have left-to-right associativity. For example, `x / y * z` is equivalent +to `(x / y) * z`. + +### Comparison Operators + +Comparison operators always produce boolean values, as a result of testing +the relationship between two values. + +The two equality operators apply to values of any type: + +``` +a == b equal +a != b not equal +``` + +Two values are equal if the are of identical types and their values are +equal as defined in the HCL syntax-agnostic information model. The equality +operators are commutative and opposite, such that `(a == b) == !(a != b)` +and `(a == b) == (b == a)` for all values `a` and `b`. + +The four numeric comparison operators apply only to numbers: + +``` +a < b less than +a <= b less than or equal to +a > b greater than +a >= b greater than or equal to +``` + +If either operand of a comparison operator is a correctly-typed unknown value +or a value of the dynamic pseudo-type, the result is an unknown boolean. + +### Arithmetic Operators + +Arithmetic operators apply only to number values and always produce number +values as results. + +``` +a + b sum (addition) +a - b difference (subtraction) +a * b product (multiplication) +a / b quotient (division) +a % b remainder (modulo) +-a negation +``` + +Arithmetic operations are considered to be performed in an arbitrary-precision +number space. + +If either operand of an arithmetic operator is an unknown number or a value +of the dynamic pseudo-type, the result is an unknown number. + +### Logic Operators + +Logic operators apply only to boolean values and always produce boolean values +as results. + +``` +a && b logical AND +a || b logical OR +!a logical NOT +``` + +If either operand of a logic operator is an unknown bool value or a value +of the dynamic pseudo-type, the result is an unknown bool value. + +### Conditional Operator + +The conditional operator allows selecting from one of two expressions based on +the outcome of a boolean expression. + +```ebnf +Conditional = Expression "?" Expression ":" Expression; +``` + +The first expression is the _predicate_, which is evaluated and must produce +a boolean result. If the predicate value is `true`, the result of the second +expression is the result of the conditional. If the predicate value is +`false`, the result of the third expression is the result of the conditional. + +The second and third expressions must be of the same type or must be able to +unify into a common type using the type unification rules defined in the +HCL syntax-agnostic information model. This unified type is the result type +of the conditional, with both expressions converted as necessary to the +unified type. + +If the predicate is an unknown boolean value or a value of the dynamic +pseudo-type then the result is an unknown value of the unified type of the +other two expressions. + +If either the second or third expressions produce errors when evaluated, +these errors are passed through only if the erroneous expression is selected. +This allows for expressions such as +`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length` +function) without producing an error when the predicate is `false`. + +## Templates + +The template sub-language is used within template expressions to concisely +combine strings and other values to produce other strings. It can also be +used in isolation as a standalone template language. + +```ebnf +Template = ( + TemplateLiteral | + TemplateInterpolation | + TemplateDirective +)* +TemplateDirective = TemplateIf | TemplateFor; +``` + +A template behaves like an expression that always returns a string value. +The different elements of the template are evaluated and combined into a +single string to return. If any of the elements produce an unknown string +or a value of the dynamic pseudo-type, the result is an unknown string. + +An important use-case for standalone templates is to enable the use of +expressions in alternative HCL syntaxes where a native expression grammar is +not available. For example, the HCL JSON profile treats the values of JSON +strings as standalone templates when attributes are evaluated in expression +mode. + +### Template Literals + +A template literal is a literal sequence of characters to include in the +resulting string. When the template sub-language is used standalone, a +template literal can contain any unicode character, with the exception +of the sequences that introduce interpolations and directives, and for the +sequences that escape those introductions. + +The interpolation and directive introductions are escaped by doubling their +leading characters. The `${` sequence is escaped as `$${` and the `%{` +sequence is escaped as `%%{`. + +When the template sub-language is embedded in the expression language via +_template expressions_, additional constraints and transforms are applied to +template literals as described in the definition of template expressions. + +The value of a template literal can be modified by _strip markers_ in any +interpolations or directives that are adjacent to it. A strip marker is +a tilde (`~`) placed immediately after the opening `{` or before the closing +`}` of a template sequence: + +- `hello ${~ "world" }` produces `"helloworld"`. +- `%{ if true ~} hello %{~ endif }` produces `"hello"`. + +When a strip marker is present, any spaces adjacent to it in the corresponding +string literal (if any) are removed before producing the final value. Space +characters are interpreted as per Unicode's definition. + +Stripping is done at syntax level rather than value level. Values returned +by interpolations or directives are not subject to stripping: + +- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, + because the space is not in a template literal directly adjacent to the + strip marker. + +### Template Interpolations + +An _interpolation sequence_ evaluates an expression (written in the +expression sub-language), converts the result to a string value, and +replaces itself with the resulting string. + +```ebnf +TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}"; +``` + +If the expression result cannot be converted to a string, an error is +produced. + +### Template If Directive + +The template `if` directive is the template equivalent of the +_conditional expression_, allowing selection of one of two sub-templates based +on the value of a predicate expression. + +```ebnf +TemplateIf = ( + ("%{" | "%{~") "if" Expression ("}" | "~}") + Template + ( + ("%{" | "%{~") "else" ("}" | "~}") + Template + )? + ("%{" | "%{~") "endif" ("}" | "~}") +); +``` + +The evaluation of the `if` directive is equivalent to the conditional +expression, with the following exceptions: + +- The two sub-templates always produce strings, and thus the result value is + also always a string. +- The `else` clause may be omitted, in which case the conditional's third + expression result is implied to be the empty string. + +### Template For Directive + +The template `for` directive is the template equivalent of the _for expression_, +producing zero or more copies of its sub-template based on the elements of +a collection. + +```ebnf +TemplateFor = ( + ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}") + Template + ("%{" | "%{~") "endfor" ("}" | "~}") +); +``` + +The evaluation of the `for` directive is equivalent to the _for expression_ +when producing a tuple, with the following exceptions: + +- The sub-template always produces a string. +- There is no equivalent of the "if" clause on the for expression. +- The elements of the resulting tuple are all converted to strings and + concatenated to produce a flat string result. + +### Template Interpolation Unwrapping + +As a special case, a template that consists only of a single interpolation, +with no surrounding literals, directives or other interpolations, is +"unwrapped". In this case, the result of the interpolation expression is +returned verbatim, without conversion to string. + +This special case exists primarily to enable the native template language +to be used inside strings in alternative HCL syntaxes that lack a first-class +template or expression syntax. Unwrapping allows arbitrary expressions to be +used to populate attributes when strings in such languages are interpreted +as templates. + +- `${true}` produces the boolean value `true` +- `${"${true}"}` produces the boolean value `true`, because both the inner + and outer interpolations are subject to unwrapping. +- `hello ${true}` produces the string `"hello true"` +- `${""}${true}` produces the string `"true"` because there are two + interpolation sequences, even though one produces an empty result. +- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because + the presence of the `for` directive circumvents the unwrapping even though + the final result is a single value. + +In some contexts this unwrapping behavior may be circumvented by the calling +application, by converting the final template result to string. This is +necessary, for example, if a standalone template is being used to produce +the direct contents of a file, since the result in that case must always be a +string. + +## Static Analysis + +The HCL static analysis operations are implemented for some expression types +in the native syntax, as described in the following sections. + +A goal for static analysis of the native syntax is for the interpretation to +be as consistent as possible with the dynamic evaluation interpretation of +the given expression, though some deviations are intentionally made in order +to maximize the potential for analysis. + +### Static List + +The tuple construction syntax can be interpreted as a static list. All of +the expression elements given are returned as the static list elements, +with no further interpretation. + +### Static Map + +The object construction syntax can be interpreted as a static map. All of the +key/value pairs given are returned as the static pairs, with no further +interpretation. + +The usual requirement that an attribute name be interpretable as a string +does not apply to this static analysis, allowing callers to provide map-like +constructs with different key types by building on the map syntax. + +### Static Call + +The function call syntax can be interpreted as a static call. The called +function name is returned verbatim and the given argument expressions are +returned as the static arguments, with no further interpretation. + +### Static Traversal + +A variable expression and any attached attribute access operations and +constant index operations can be interpreted as a static traversal. + +The keywords `true`, `false` and `null` can also be interpreted as +static traversals, behaving as if they were references to variables of those +names, to allow callers to redefine the meaning of those keywords in certain +contexts. diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go new file mode 100644 index 000000000..2f7470c77 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go @@ -0,0 +1,394 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + if b == nil { + return nil + } + + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the HCL native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + w(b.Attributes) + w(b.Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported argument", + Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %q block", example.Type), + Detail: "Blocks are not allowed here.", + Subject: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return hcl.Range{ + Filename: b.SrcRange.Filename, + Start: b.SrcRange.Start, + End: b.SrcRange.Start, + } +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for _, attr := range a { + w(attr) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + w(a.Expr) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + if a == nil { + return nil + } + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for _, block := range bs { + w(block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + w(b.Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} + +func (b *Block) DefRange() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go new file mode 100644 index 000000000..587844ac2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go @@ -0,0 +1,118 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl/v2" +) + +// ----------------------------------------------------------------------------- +// The methods in this file are all optional extension methods that serve to +// implement the methods of the same name on *hcl.File when its root body +// is provided by this package. +// ----------------------------------------------------------------------------- + +// BlocksAtPos implements the method of the same name for an *hcl.File that +// is backed by a *Body. +func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block { + list, _ := b.blocksAtPos(pos, true) + return list +} + +// InnermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block { + _, innermost := b.blocksAtPos(pos, false) + return innermost.AsHCLBlock() +} + +// OutermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block { + return b.outermostBlockAtPos(pos).AsHCLBlock() +} + +// blocksAtPos is the internal engine of both BlocksAtPos and +// InnermostBlockAtPos, which both need to do the same logic but return a +// differently-shaped result. +// +// list is nil if makeList is false, avoiding an allocation. Innermost is +// always set, and if the returned list is non-nil it will always match the +// final element from that list. +func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) { + current := b + +Blocks: + for current != nil { + for _, block := range current.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + innermost = block + if makeList { + list = append(list, innermost.AsHCLBlock()) + } + current = block.Body + continue Blocks + } + } + + // If we fall out here then none of the current body's nested blocks + // contain the position we are looking for, and so we're done. + break + } + + return +} + +// outermostBlockAtPos is the internal version of OutermostBlockAtPos that +// returns a hclsyntax.Block rather than an hcl.Block, allowing for further +// analysis if necessary. +func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block { + // This is similar to blocksAtPos, but simpler because we know it only + // ever needs to search the first level of nested blocks. + + for _, block := range b.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + return block + } + } + + return nil +} + +// AttributeAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute { + return b.attributeAtPos(pos).AsHCLAttribute() +} + +// attributeAtPos is the internal version of AttributeAtPos that returns a +// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if +// necessary. +func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute { + searchBody := b + _, block := b.blocksAtPos(pos, false) + if block != nil { + searchBody = block.Body + } + + for _, attr := range searchBody.Attributes { + if attr.SrcRange.ContainsPos(pos) { + return attr + } + } + + return nil +} + +// OutermostExprAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression { + attr := b.attributeAtPos(pos) + if attr == nil { + return nil + } + if !attr.Expr.Range().ContainsPos(pos) { + return nil + } + return attr.Expr +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go new file mode 100644 index 000000000..c7ffe2073 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go @@ -0,0 +1,320 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl/v2" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenApostrophe TokenType = '\'' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + TokenQuotedNewline TokenType = '␤' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate + scanIdentOnly +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token + StartByte int +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + f.StartByte + + end := start + end.Byte = endOfs + f.StartByte + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +func tokenOpensFlushHeredoc(tok Token) bool { + if tok.Type != TokenOHeredoc { + return false + } + return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'}) +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldApostrophe := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + // copy token so it's safe to point to it + tok := tok + + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if (toldBacktick % 2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < 0: + line.lead[0].SpacesBefore = 2 * len(indents) + indents = append(indents, netBrackets) + case netBrackets < 0: + closed := -netBrackets + for closed > 0 && len(indents) > 0 { + switch { + + case closed > indents[len(indents)-1]: + closed -= indents[len(indents)-1] + indents = indents[:len(indents)-1] + + case closed < indents[len(indents)-1]: + indents[len(indents)-1] -= closed + closed = 0 + + default: + indents = indents[:len(indents)-1] + closed = 0 + } + } + line.lead[0].SpacesBefore = 2 * len(indents) + default: + line.lead[0].SpacesBefore = 2 * len(indents) + } + } +} + +func formatSpaces(lines []formatLine) { + for _, line := range lines { + for i, token := range line.lead { + var before, after *Token + if i > 0 { + before = line.lead[i-1] + } else { + before = nilToken + } + if i < (len(line.lead) - 1) { + after = line.lead[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + for i, token := range line.assign { + if i == 0 { + // first token in "assign" always has one space before to + // separate the equals sign from what it's assigning. + token.SpacesBefore = 1 + } + + var before, after *Token + if i > 0 { + before = line.assign[i-1] + } else { + before = nilToken + } + if i < (len(line.assign) - 1) { + after = line.assign[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + + } +} + +func formatCells(lines []formatLine) { + + chainStart := -1 + maxColumns := 0 + + // We'll deal with the "assign" cell first, since moving that will + // also impact the "comment" cell. + closeAssignChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.assign[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.assign == nil { + if chainStart != -1 { + closeAssignChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeAssignChain(len(lines)) + } + + // Now we'll deal with the comments + closeCommentChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + chainLine.assign.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.comment[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.comment == nil { + if chainStart != -1 { + closeCommentChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + line.assign.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeCommentChain(len(lines)) + } + +} + +// spaceAfterToken decides whether a particular subject token should have a +// space after it when surrounded by the given before and after tokens. +// "before" can be TokenNil, if the subject token is at the start of a sequence. +func spaceAfterToken(subject, before, after *Token) bool { + switch { + + case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: + // Never add spaces before a newline + return false + + case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: + // Don't split a function name from open paren in a call + return false + + case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: + // Don't use spaces around attribute access dots + return false + + case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: + // No space right before a comma or ... in an argument list + return false + + case subject.Type == hclsyntax.TokenComma: + // Always a space after a comma + return true + + case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: + // No extra spaces within templates + return false + + case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: + // This is a special case for inside for expressions where a user + // might want to use a literal tuple constructor: + // [for x in [foo]: x] + // ... in that case, we would normally produce in[foo] thinking that + // in is a reference, but we'll recognize it as a keyword here instead + // to make the result less confusing. + return true + + case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): + return false + + case subject.Type == hclsyntax.TokenMinus: + // Since a minus can either be subtraction or negation, and the latter + // should _not_ have a space after it, we need to use some heuristics + // to decide which case this is. + // We guess that we have a negation if the token before doesn't look + // like it could be the end of an expression. + + switch before.Type { + + case hclsyntax.TokenNil: + // Minus at the start of input must be a negation + return false + + case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: + // Minus immediately after an opening bracket or separator must be a negation. + return false + + case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: + // Minus immediately after another arithmetic operator must be negation. + return false + + case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: + // Minus immediately after another comparison operator must be negation. + return false + + case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: + // Minus immediately after logical operator doesn't make sense but probably intended as negation. + return false + + default: + return true + } + + case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: + // Unlike other bracket types, braces have spaces on both sides of them, + // both in single-line nested blocks foo { bar = baz } and in object + // constructor expressions foo = { bar = baz }. + if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { + // An open brace followed by a close brace is an exception, however. + // e.g. foo {} rather than foo { } + return false + } + return true + + // In the unlikely event that an interpolation expression is just + // a single object constructor, we'll put a space between the ${ and + // the following { to make this more obvious, and then the same + // thing for the two braces at the end. + case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: + return true + case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: + return true + + // Don't add spaces between interpolated items + case subject.Type == hclsyntax.TokenTemplateSeqEnd && (after.Type == hclsyntax.TokenTemplateInterp || after.Type == hclsyntax.TokenTemplateControl): + return false + + case tokenBracketChange(subject) > 0: + // No spaces after open brackets + return false + + case tokenBracketChange(after) < 0: + // No spaces before close brackets + return false + + default: + // Most tokens are space-separated + return true + + } +} + +func linesForFormat(tokens Tokens) []formatLine { + if len(tokens) == 0 { + return make([]formatLine, 0) + } + + // first we'll count our lines, so we can allocate the array for them in + // a single block. (We want to minimize memory pressure in this codepath, + // so it can be run somewhat-frequently by editor integrations.) + lineCount := 1 // if there are zero newlines then there is one line + for _, tok := range tokens { + if tokenIsNewline(tok) { + lineCount++ + } + } + + // To start, we'll just put everything in the "lead" cell on each line, + // and then do another pass over the lines afterwards to adjust. + lines := make([]formatLine, lineCount) + li := 0 + lineStart := 0 + for i, tok := range tokens { + if tok.Type == hclsyntax.TokenEOF { + // The EOF token doesn't belong to any line, and terminates the + // token sequence. + lines[li].lead = tokens[lineStart:i] + break + } + + if tokenIsNewline(tok) { + lines[li].lead = tokens[lineStart : i+1] + lineStart = i + 1 + li++ + } + } + + // If a set of tokens doesn't end in TokenEOF (e.g. because it's a + // fragment of tokens from the middle of a file) then we might fall + // out here with a line still pending. + if lineStart < len(tokens) { + lines[li].lead = tokens[lineStart:] + if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { + lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] + } + } + + // Now we'll pick off any trailing comments and attribute assignments + // to shuffle off into the "comment" and "assign" cells. + for i := range lines { + line := &lines[i] + + if len(line.lead) == 0 { + // if the line is empty then there's nothing for us to do + // (this should happen only for the final line, because all other + // lines would have a newline token of some kind) + continue + } + + if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { + line.comment = line.lead[len(line.lead)-1:] + line.lead = line.lead[:len(line.lead)-1] + } + + for i, tok := range line.lead { + if i > 0 && tok.Type == hclsyntax.TokenEqual { + // We only move the tokens into "assign" if the RHS seems to + // be a whole expression, which we determine by counting + // brackets. If there's a net positive number of brackets + // then that suggests we're introducing a multi-line expression. + netBrackets := 0 + for _, token := range line.lead[i:] { + netBrackets += tokenBracketChange(token) + } + + if netBrackets == 0 { + line.assign = line.lead[i:] + line.lead = line.lead[:i] + } + break + } + } + } + + return lines +} + +func tokenIsNewline(tok *Token) bool { + if tok.Type == hclsyntax.TokenNewline { + return true + } else if tok.Type == hclsyntax.TokenComment { + // Single line tokens (# and //) consume their terminating newline, + // so we need to treat them as newline tokens as well. + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + return true + } + } + return false +} + +func tokenBracketChange(tok *Token) int { + switch tok.Type { + case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: + return 1 + case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: + return -1 + default: + return 0 + } +} + +// formatLine represents a single line of source code for formatting purposes, +// splitting its tokens into up to three "cells": +// +// lead: always present, representing everything up to one of the others +// assign: if line contains an attribute assignment, represents the tokens +// starting at (and including) the equals symbol +// comment: if line contains any non-comment tokens and ends with a +// single-line comment token, represents the comment. +// +// When formatting, the leading spaces of the first tokens in each of these +// cells is adjusted to align vertically their occurences on consecutive +// rows. +type formatLine struct { + lead Tokens + assign Tokens + comment Tokens +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go new file mode 100644 index 000000000..289a30d68 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/generate.go @@ -0,0 +1,250 @@ +package hclwrite + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// TokensForValue returns a sequence of tokens that represents the given +// constant value. +// +// This function only supports types that are used by HCL. In particular, it +// does not support capsule types and will panic if given one. +// +// It is not possible to express an unknown value in source code, so this +// function will panic if the given value is unknown or contains any unknown +// values. A caller can call the value's IsWhollyKnown method to verify that +// no unknown values are present before calling TokensForValue. +func TokensForValue(val cty.Value) Tokens { + toks := appendTokensForValue(val, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +// TokensForTraversal returns a sequence of tokens that represents the given +// traversal. +// +// If the traversal is absolute then the result is a self-contained, valid +// reference expression. If the traversal is relative then the returned tokens +// could be appended to some other expression tokens to traverse into the +// represented expression. +func TokensForTraversal(traversal hcl.Traversal) Tokens { + toks := appendTokensForTraversal(traversal, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +func appendTokensForValue(val cty.Value, toks Tokens) Tokens { + switch { + + case !val.IsKnown(): + panic("cannot produce tokens for unknown value") + + case val.IsNull(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(`null`), + }) + + case val.Type() == cty.Bool: + var src []byte + if val.True() { + src = []byte(`true`) + } else { + src = []byte(`false`) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: src, + }) + + case val.Type() == cty.Number: + bf := val.AsBigFloat() + srcStr := bf.Text('f', -1) + toks = append(toks, &Token{ + Type: hclsyntax.TokenNumberLit, + Bytes: []byte(srcStr), + }) + + case val.Type() == cty.String: + // TODO: If it's a multi-line string ending in a newline, format + // it as a HEREDOC instead. + src := escapeQuotedStringLit(val.AsString()) + toks = append(toks, &Token{ + Type: hclsyntax.TokenOQuote, + Bytes: []byte{'"'}, + }) + if len(src) > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenQuotedLit, + Bytes: src, + }) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenCQuote, + Bytes: []byte{'"'}, + }) + + case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + _, eVal := it.Element() + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + case val.Type().IsMapType() || val.Type().IsObjectType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + eKey, eVal := it.Element() + if hclsyntax.ValidIdentifier(eKey.AsString()) { + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(eKey.AsString()), + }) + } else { + toks = appendTokensForValue(eKey, toks) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }) + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }) + + default: + panic(fmt.Sprintf("cannot produce tokens for %#v", val)) + } + + return toks +} + +func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { + for _, step := range traversal { + appendTokensForTraversalStep(step, toks) + } + return toks +} + +func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) { + switch ts := step.(type) { + case hcl.TraverseRoot: + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }) + case hcl.TraverseAttr: + toks = append( + toks, + &Token{ + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }, + ) + case hcl.TraverseIndex: + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + appendTokensForValue(ts.Key, toks) + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + default: + panic(fmt.Sprintf("unsupported traversal step type %T", step)) + } +} + +func escapeQuotedStringLit(s string) []byte { + if len(s) == 0 { + return nil + } + buf := make([]byte, 0, len(s)) + for i, r := range s { + switch r { + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + case '$', '%': + buf = appendRune(buf, r) + remain := s[i+1:] + if len(remain) > 0 && remain[0] == '{' { + // Double up our template introducer symbol to escape it. + buf = appendRune(buf, r) + } + default: + if !unicode.IsPrint(r) { + var fmted string + if r < 65536 { + fmted = fmt.Sprintf("\\u%04x", r) + } else { + fmted = fmt.Sprintf("\\U%08x", r) + } + buf = append(buf, fmted...) + } else { + buf = appendRune(buf, r) + } + } + } + return buf +} + +func appendRune(b []byte, r rune) []byte { + l := utf8.RuneLen(r) + for i := 0; i < l; i++ { + b = append(b, 0) // make room at the end of our buffer + } + ch := b[len(b)-l:] + utf8.EncodeRune(ch, r) + return b +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go new file mode 100644 index 000000000..cedf68627 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/native_node_sorter.go @@ -0,0 +1,23 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +type nativeNodeSorter struct { + Nodes []hclsyntax.Node +} + +func (s nativeNodeSorter) Len() int { + return len(s.Nodes) +} + +func (s nativeNodeSorter) Less(i, j int) bool { + rangeI := s.Nodes[i].Range() + rangeJ := s.Nodes[j].Range() + return rangeI.Start.Byte < rangeJ.Start.Byte +} + +func (s nativeNodeSorter) Swap(i, j int) { + s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go new file mode 100644 index 000000000..45669f7f9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/node.go @@ -0,0 +1,260 @@ +package hclwrite + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" +) + +// node represents a node in the AST. +type node struct { + content nodeContent + + list *nodes + before, after *node +} + +func newNode(c nodeContent) *node { + return &node{ + content: c, + } +} + +func (n *node) Equal(other *node) bool { + return cmp.Equal(n.content, other.content) +} + +func (n *node) BuildTokens(to Tokens) Tokens { + return n.content.BuildTokens(to) +} + +// Detach removes the receiver from the list it currently belongs to. If the +// node is not currently in a list, this is a no-op. +func (n *node) Detach() { + if n.list == nil { + return + } + if n.before != nil { + n.before.after = n.after + } + if n.after != nil { + n.after.before = n.before + } + if n.list.first == n { + n.list.first = n.after + } + if n.list.last == n { + n.list.last = n.before + } + n.list = nil + n.before = nil + n.after = nil +} + +// ReplaceWith removes the receiver from the list it currently belongs to and +// inserts a new node with the given content in its place. If the node is not +// currently in a list, this function will panic. +// +// The return value is the newly-constructed node, containing the given content. +// After this function returns, the reciever is no longer attached to a list. +func (n *node) ReplaceWith(c nodeContent) *node { + if n.list == nil { + panic("can't replace node that is not in a list") + } + + before := n.before + after := n.after + list := n.list + n.before, n.after, n.list = nil, nil, nil + + nn := newNode(c) + nn.before = before + nn.after = after + nn.list = list + if before != nil { + before.after = nn + } + if after != nil { + after.before = nn + } + return nn +} + +func (n *node) assertUnattached() { + if n.list != nil { + panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) + } +} + +// nodeContent is the interface type implemented by all AST content types. +type nodeContent interface { + walkChildNodes(w internalWalkFunc) + BuildTokens(to Tokens) Tokens +} + +// nodes is a list of nodes. +type nodes struct { + first, last *node +} + +func (ns *nodes) BuildTokens(to Tokens) Tokens { + for n := ns.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +func (ns *nodes) Clear() { + ns.first = nil + ns.last = nil +} + +func (ns *nodes) Append(c nodeContent) *node { + n := &node{ + content: c, + } + ns.AppendNode(n) + n.list = ns + return n +} + +func (ns *nodes) AppendNode(n *node) { + if ns.last != nil { + n.before = ns.last + ns.last.after = n + } + n.list = ns + ns.last = n + if ns.first == nil { + ns.first = n + } +} + +func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { + if len(tokens) == 0 { + return nil + } + n := newNode(tokens) + ns.AppendNode(n) + n.list = ns + return n +} + +// FindNodeWithContent searches the nodes for a node whose content equals +// the given content. If it finds one then it returns it. Otherwise it returns +// nil. +func (ns *nodes) FindNodeWithContent(content nodeContent) *node { + for n := ns.first; n != nil; n = n.after { + if n.content == content { + return n + } + } + return nil +} + +// nodeSet is an unordered set of nodes. It is used to describe a set of nodes +// that all belong to the same list that have some role or characteristic +// in common. +type nodeSet map[*node]struct{} + +func newNodeSet() nodeSet { + return make(nodeSet) +} + +func (ns nodeSet) Has(n *node) bool { + if ns == nil { + return false + } + _, exists := ns[n] + return exists +} + +func (ns nodeSet) Add(n *node) { + ns[n] = struct{}{} +} + +func (ns nodeSet) Remove(n *node) { + delete(ns, n) +} + +func (ns nodeSet) List() []*node { + if len(ns) == 0 { + return nil + } + + ret := make([]*node, 0, len(ns)) + + // Determine which list we are working with. We assume here that all of + // the nodes belong to the same list, since that is part of the contract + // for nodeSet. + var list *nodes + for n := range ns { + list = n.list + break + } + + // We recover the order by iterating over the whole list. This is not + // the most efficient way to do it, but our node lists should always be + // small so not worth making things more complex. + for n := list.first; n != nil; n = n.after { + if ns.Has(n) { + ret = append(ret, n) + } + } + return ret +} + +// FindNodeWithContent searches the nodes for a node whose content equals +// the given content. If it finds one then it returns it. Otherwise it returns +// nil. +func (ns nodeSet) FindNodeWithContent(content nodeContent) *node { + for n := range ns { + if n.content == content { + return n + } + } + return nil +} + +type internalWalkFunc func(*node) + +// inTree can be embedded into a content struct that has child nodes to get +// a standard implementation of the NodeContent interface and a record of +// a potential parent node. +type inTree struct { + parent *node + children *nodes +} + +func newInTree() inTree { + return inTree{ + children: &nodes{}, + } +} + +func (it *inTree) assertUnattached() { + if it.parent != nil { + panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) + } +} + +func (it *inTree) walkChildNodes(w internalWalkFunc) { + for n := it.children.first; n != nil; n = n.after { + w(n) + } +} + +func (it *inTree) BuildTokens(to Tokens) Tokens { + for n := it.children.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +// leafNode can be embedded into a content struct to give it a do-nothing +// implementation of walkChildNodes +type leafNode struct { +} + +func (n *leafNode) walkChildNodes(w internalWalkFunc) { +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go new file mode 100644 index 000000000..d6cf532ea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/parser.go @@ -0,0 +1,599 @@ +package hclwrite + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// Our "parser" here is actually not doing any parsing of its own. Instead, +// it leans on the native parser in hclsyntax, and then uses the source ranges +// from the AST to partition the raw token sequence to match the raw tokens +// up to AST nodes. +// +// This strategy feels somewhat counter-intuitive, since most of the work the +// parser does is thrown away here, but this strategy is chosen because the +// normal parsing work done by hclsyntax is considered to be the "main case", +// while modifying and re-printing source is more of an edge case, used only +// in ancillary tools, and so it's good to keep all the main parsing logic +// with the main case but keep all of the extra complexity of token wrangling +// out of the main parser, which is already rather complex just serving the +// use-cases it already serves. +// +// If the parsing step produces any errors, the returned File is nil because +// we can't reliably extract tokens from the partial AST produced by an +// erroneous parse. +func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + file, diags := hclsyntax.ParseConfig(src, filename, start) + if diags.HasErrors() { + return nil, diags + } + + // To do our work here, we use the "native" tokens (those from hclsyntax) + // to match against source ranges in the AST, but ultimately produce + // slices from our sequence of "writer" tokens, which contain only + // *relative* position information that is more appropriate for + // transformation/writing use-cases. + nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) + if diags.HasErrors() { + // should never happen, since we would've caught these diags in + // the first call above. + return nil, diags + } + writerTokens := writerTokens(nativeTokens) + + from := inputTokens{ + nativeTokens: nativeTokens, + writerTokens: writerTokens, + } + + before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) + ret := &File{ + inTree: newInTree(), + + srcBytes: src, + body: root, + } + + nodes := ret.inTree.children + nodes.Append(before.Tokens()) + nodes.AppendNode(root) + nodes.Append(after.Tokens()) + + return ret, diags +} + +type inputTokens struct { + nativeTokens hclsyntax.Tokens + writerTokens Tokens +} + +func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { + for i, t := range it.writerTokens { + if t.Type == ty { + return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) + } + } + panic(fmt.Sprintf("didn't find any token of type %s", ty)) +} + +func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { + before, within, after := it.PartitionType(ty) + if within.Len() != 1 { + panic("PartitionType found more than one token") + } + return before, within.Tokens()[0], after +} + +// PartitionIncludeComments is like Partition except the returned "within" +// range includes any lead and line comments associated with the range. +func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + start = partitionLeadCommentTokens(it.nativeTokens[:start]) + _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) + end += afterNewline + + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return + +} + +// PartitionBlockItem is similar to PartitionIncludeComments but it returns +// the comments as separate token sequences so that they can be captured into +// AST attributes. It makes assumptions that apply only to block items, so +// should not be used for other constructs. +func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { + before, within, after = it.Partition(rng) + before, leadComments = before.PartitionLeadComments() + lineComments, newline, after = after.PartitionLineEndTokens() + return +} + +func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { + start := partitionLeadCommentTokens(it.nativeTokens) + before = it.Slice(0, start) + within = it.Slice(start, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { + afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) + comments = it.Slice(0, afterComments) + newline = it.Slice(afterComments, afterNewline) + after = it.Slice(afterNewline, len(it.nativeTokens)) + return +} + +func (it inputTokens) Slice(start, end int) inputTokens { + // When we slice, we create a new slice with no additional capacity because + // we expect that these slices will be mutated in order to insert + // new code into the AST, and we want to ensure that a new underlying + // array gets allocated in that case, rather than writing into some + // following slice and corrupting it. + return inputTokens{ + nativeTokens: it.nativeTokens[start:end:end], + writerTokens: it.writerTokens[start:end:end], + } +} + +func (it inputTokens) Len() int { + return len(it.nativeTokens) +} + +func (it inputTokens) Tokens() Tokens { + return it.writerTokens +} + +func (it inputTokens) Types() []hclsyntax.TokenType { + ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) + for i, tok := range it.nativeTokens { + ret[i] = tok.Type + } + return ret +} + +// parseBody locates the given body within the given input tokens and returns +// the resulting *Body object as well as the tokens that appeared before and +// after it. +func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { + before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) + + // The main AST doesn't retain the original source ordering of the + // body items, so we need to reconstruct that ordering by inspecting + // their source ranges. + nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) + for _, nativeAttr := range nativeBody.Attributes { + nativeItems = append(nativeItems, nativeAttr) + } + for _, nativeBlock := range nativeBody.Blocks { + nativeItems = append(nativeItems, nativeBlock) + } + sort.Sort(nativeNodeSorter{nativeItems}) + + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + + remain := within + for _, nativeItem := range nativeItems { + beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) + + if beforeItem.Len() > 0 { + body.AppendUnstructuredTokens(beforeItem.Tokens()) + } + body.appendItemNode(item) + + remain = afterItem + } + + if remain.Len() > 0 { + body.AppendUnstructuredTokens(remain.Tokens()) + } + + return before, newNode(body), after +} + +func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { + before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) + + var item *node + + switch tItem := nativeItem.(type) { + case *hclsyntax.Attribute: + item = parseAttribute(tItem, within, leadComments, lineComments, newline) + case *hclsyntax.Block: + item = parseBlock(tItem, within, leadComments, lineComments, newline) + default: + // should never happen if caller is behaving + panic("unsupported native item type") + } + + return before, item, after +} + +func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { + attr := &Attribute{ + inTree: newInTree(), + } + children := attr.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + attr.leadComments = cn + children.AppendNode(cn) + } + + before, nameTokens, from := from.Partition(nativeAttr.NameRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if nameTokens.Len() != 1 { + // Should never happen with valid input + panic("attribute name is not exactly one token") + } + token := nameTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + attr.name = in + children.AppendNode(in) + } + + before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(equalsTokens.Tokens()) + + before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) + { + children.AppendUnstructuredTokens(before.Tokens()) + exprNode := parseExpression(nativeAttr.Expr, exprTokens) + attr.expr = exprNode + children.AppendNode(exprNode) + } + + { + cn := newNode(newComments(lineComments.Tokens())) + attr.lineComments = cn + children.AppendNode(cn) + } + + children.AppendUnstructuredTokens(newline.Tokens()) + + // Collect any stragglers, though there shouldn't be any + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(attr) +} + +func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { + block := &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } + children := block.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + block.leadComments = cn + children.AppendNode(cn) + } + + before, typeTokens, from := from.Partition(nativeBlock.TypeRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if typeTokens.Len() != 1 { + // Should never happen with valid input + panic("block type name is not exactly one token") + } + token := typeTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + block.typeName = in + children.AppendNode(in) + } + + for _, rng := range nativeBlock.LabelRanges { + var labelTokens inputTokens + before, labelTokens, from = from.Partition(rng) + children.AppendUnstructuredTokens(before.Tokens()) + tokens := labelTokens.Tokens() + var ln *node + if len(tokens) == 1 && tokens[0].Type == hclsyntax.TokenIdent { + ln = newNode(newIdentifier(tokens[0])) + } else { + ln = newNode(newQuoted(tokens)) + } + block.labels.Add(ln) + children.AppendNode(ln) + } + + before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(oBrace.Tokens()) + + // We go a bit out of order here: we go hunting for the closing brace + // so that we have a delimited body, but then we'll deal with the body + // before we actually append the closing brace and any straggling tokens + // that appear after it. + bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) + before, body, after := parseBody(nativeBlock.Body, bodyTokens) + children.AppendUnstructuredTokens(before.Tokens()) + block.body = body + children.AppendNode(body) + children.AppendUnstructuredTokens(after.Tokens()) + + children.AppendUnstructuredTokens(cBrace.Tokens()) + + // stragglers + children.AppendUnstructuredTokens(from.Tokens()) + if lineComments.Len() > 0 { + // blocks don't actually have line comments, so we'll just treat + // them as extra stragglers + children.AppendUnstructuredTokens(lineComments.Tokens()) + } + children.AppendUnstructuredTokens(newline.Tokens()) + + return newNode(block) +} + +func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { + expr := newExpression() + children := expr.inTree.children + + nativeVars := nativeExpr.Variables() + + for _, nativeTraversal := range nativeVars { + before, traversal, after := parseTraversal(nativeTraversal, from) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(traversal) + expr.absTraversals.Add(traversal) + from = after + } + // Attach any stragglers that don't belong to a traversal to the expression + // itself. In an expression with no traversals at all, this is just the + // entirety of "from". + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(expr) +} + +func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { + traversal := newTraversal() + children := traversal.inTree.children + before, from, after = from.Partition(nativeTraversal.SourceRange()) + + stepAfter := from + for _, nativeStep := range nativeTraversal { + before, step, after := parseTraversalStep(nativeStep, stepAfter) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(step) + traversal.steps.Add(step) + stepAfter = after + } + + return before, newNode(traversal), after +} + +func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { + var children *nodes + switch tNativeStep := nativeStep.(type) { + + case hcl.TraverseRoot, hcl.TraverseAttr: + step := newTraverseName() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) + name := newIdentifier(token) + children.AppendUnstructuredTokens(inBefore.Tokens()) + step.name = children.Append(name) + children.AppendUnstructuredTokens(inAfter.Tokens()) + return before, newNode(step), after + + case hcl.TraverseIndex: + step := newTraverseIndex() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + + var inBefore, oBrack, keyTokens, cBrack inputTokens + inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) + children.AppendUnstructuredTokens(inBefore.Tokens()) + children.AppendUnstructuredTokens(oBrack.Tokens()) + keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) + + keyVal := tNativeStep.Key + switch keyVal.Type() { + case cty.String: + key := newQuoted(keyTokens.Tokens()) + step.key = children.Append(key) + case cty.Number: + valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) + children.AppendUnstructuredTokens(valBefore.Tokens()) + key := newNumber(valToken) + step.key = children.Append(key) + children.AppendUnstructuredTokens(valAfter.Tokens()) + } + + children.AppendUnstructuredTokens(cBrack.Tokens()) + children.AppendUnstructuredTokens(from.Tokens()) + + return before, newNode(step), after + default: + panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) + } + +} + +// writerTokens takes a sequence of tokens as produced by the main hclsyntax +// package and transforms it into an equivalent sequence of tokens using +// this package's own token model. +// +// The resulting list contains the same number of tokens and uses the same +// indices as the input, allowing the two sets of tokens to be correlated +// by index. +func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { + // Ultimately we want a slice of token _pointers_, but since we can + // predict how much memory we're going to devote to tokens we'll allocate + // it all as a single flat buffer and thus give the GC less work to do. + tokBuf := make([]Token, len(nativeTokens)) + var lastByteOffset int + for i, mainToken := range nativeTokens { + // Create a copy of the bytes so that we can mutate without + // corrupting the original token stream. + bytes := make([]byte, len(mainToken.Bytes)) + copy(bytes, mainToken.Bytes) + + tokBuf[i] = Token{ + Type: mainToken.Type, + Bytes: bytes, + + // We assume here that spaces are always ASCII spaces, since + // that's what the scanner also assumes, and thus the number + // of bytes skipped is also the number of space characters. + SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, + } + + lastByteOffset = mainToken.Range.End.Byte + } + + // Now make a slice of pointers into the previous slice. + ret := make(Tokens, len(tokBuf)) + for i := range ret { + ret[i] = &tokBuf[i] + } + + return ret +} + +// partitionTokens takes a sequence of tokens and a hcl.Range and returns +// two indices within the token sequence that correspond with the range +// boundaries, such that the slice operator could be used to produce +// three token sequences for before, within, and after respectively: +// +// start, end := partitionTokens(toks, rng) +// before := toks[:start] +// within := toks[start:end] +// after := toks[end:] +// +// This works best when the range is aligned with token boundaries (e.g. +// because it was produced in terms of the scanner's result) but if that isn't +// true then it will make a best effort that may produce strange results at +// the boundaries. +// +// Native hclsyntax tokens are used here, because they contain the necessary +// absolute position information. However, since writerTokens produces a +// correlatable sequence of writer tokens, the resulting indices can be +// used also to index into its result, allowing the partitioning of writer +// tokens to be driven by the partitioning of native tokens. +// +// The tokens are assumed to be in source order and non-overlapping, which +// will be true if the token sequence from the scanner is used directly. +func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { + // We us a linear search here because we assume tha in most cases our + // target range is close to the beginning of the sequence, and the seqences + // are generally small for most reasonable files anyway. + for i := 0; ; i++ { + if i >= len(toks) { + // No tokens for the given range at all! + return len(toks), len(toks) + } + + if toks[i].Range.Start.Byte >= rng.Start.Byte { + start = i + break + } + } + + for i := start; ; i++ { + if i >= len(toks) { + // The range "hangs off" the end of the token sequence + return start, len(toks) + } + + if toks[i].Range.Start.Byte >= rng.End.Byte { + end = i // end marker is exclusive + break + } + } + + return start, end +} + +// partitionLeadCommentTokens takes a sequence of tokens that is assumed +// to immediately precede a construct that can have lead comment tokens, +// and returns the index into that sequence where the lead comments begin. +// +// Lead comments are defined as whole lines containing only comment tokens +// with no blank lines between. If no such lines are found, the returned +// index will be len(toks). +func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { + // single-line comments (which is what we're interested in here) + // consume their trailing newline, so we can just walk backwards + // until we stop seeing comment tokens. + for i := len(toks) - 1; i >= 0; i-- { + if toks[i].Type != hclsyntax.TokenComment { + return i + 1 + } + } + return 0 +} + +// partitionLineEndTokens takes a sequence of tokens that is assumed +// to immediately follow a construct that can have a line comment, and +// returns first the index where any line comments end and then second +// the index immediately after the trailing newline. +// +// Line comments are defined as comments that appear immediately after +// a construct on the same line where its significant tokens ended. +// +// Since single-line comment tokens (# and //) include the newline that +// terminates them, in the presence of these the two returned indices +// will be the same since the comment itself serves as the line end. +func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { + for i := 0; i < len(toks); i++ { + tok := toks[i] + if tok.Type != hclsyntax.TokenComment { + switch tok.Type { + case hclsyntax.TokenNewline: + return i, i + 1 + case hclsyntax.TokenEOF: + // Although this is valid, we mustn't include the EOF + // itself as our "newline" or else strange things will + // happen when we try to append new items. + return i, i + default: + // If we have well-formed input here then nothing else should be + // possible. This path should never happen, because we only try + // to extract tokens from the sequence if the parser succeeded, + // and it should catch this problem itself. + panic("malformed line trailers: expected only comments and newlines") + } + } + + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + // Newline at the end of a single-line comment serves both as + // the end of comments *and* the end of the line. + return i + 1, i + 1 + } + } + return len(toks), len(toks) +} + +// lexConfig uses the hclsyntax scanner to get a token stream and then +// rewrites it into this package's token model. +// +// Any errors produced during scanning are ignored, so the results of this +// function should be used with care. +func lexConfig(src []byte) Tokens { + mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) + return writerTokens(mainTokens) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go new file mode 100644 index 000000000..678a3aa45 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/public.go @@ -0,0 +1,44 @@ +package hclwrite + +import ( + "bytes" + + "github.com/hashicorp/hcl/v2" +) + +// NewFile creates a new file object that is empty and ready to have constructs +// added t it. +func NewFile() *File { + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + file := &File{ + inTree: newInTree(), + } + file.body = file.inTree.children.Append(body) + return file +} + +// ParseConfig interprets the given source bytes into a *hclwrite.File. The +// resulting AST can be used to perform surgical edits on the source code +// before turning it back into bytes again. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + return parse(src, filename, start) +} + +// Format takes source code and performs simple whitespace changes to transform +// it to a canonical layout style. +// +// Format skips constructing an AST and works directly with tokens, so it +// is less expensive than formatting via the AST for situations where no other +// changes will be made. It also ignores syntax errors and can thus be applied +// to partial source code, although the result in that case may not be +// desirable. +func Format(src []byte) []byte { + tokens := lexConfig(src) + format(tokens) + buf := &bytes.Buffer{} + tokens.WriteTo(buf) + return buf.Bytes() +} diff --git a/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go new file mode 100644 index 000000000..3bd3e5f48 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/hclwrite/tokens.go @@ -0,0 +1,122 @@ +package hclwrite + +import ( + "bytes" + "io" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// Token is a single sequence of bytes annotated with a type. It is similar +// in purpose to hclsyntax.Token, but discards the source position information +// since that is not useful in code generation. +type Token struct { + Type hclsyntax.TokenType + Bytes []byte + + // We record the number of spaces before each token so that we can + // reproduce the exact layout of the original file when we're making + // surgical changes in-place. When _new_ code is created it will always + // be in the canonical style, but we preserve layout of existing code. + SpacesBefore int +} + +// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. +// A complete token is not possible since we don't have source location +// information here, and so this method is unexported so we can be sure it will +// only be used for internal purposes where we know the range isn't important. +// +// This is primarily intended to allow us to re-use certain functionality from +// hclsyntax rather than re-implementing it against our own token type here. +func (t *Token) asHCLSyntax() hclsyntax.Token { + return hclsyntax.Token{ + Type: t.Type, + Bytes: t.Bytes, + Range: hcl.Range{ + Filename: "", + }, + } +} + +// Tokens is a flat list of tokens. +type Tokens []*Token + +func (ts Tokens) Bytes() []byte { + buf := &bytes.Buffer{} + ts.WriteTo(buf) + return buf.Bytes() +} + +func (ts Tokens) testValue() string { + return string(ts.Bytes()) +} + +// Columns returns the number of columns (grapheme clusters) the token sequence +// occupies. The result is not meaningful if there are newline or single-line +// comment tokens in the sequence. +func (ts Tokens) Columns() int { + ret := 0 + for _, token := range ts { + ret += token.SpacesBefore // spaces are always worth one column each + ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) + ret += ct + } + return ret +} + +// WriteTo takes an io.Writer and writes the bytes for each token to it, +// along with the spacing that separates each token. In other words, this +// allows serializing the tokens to a file or other such byte stream. +func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { + // We know we're going to be writing a lot of small chunks of repeated + // space characters, so we'll prepare a buffer of these that we can + // easily pass to wr.Write without any further allocation. + spaces := make([]byte, 40) + for i := range spaces { + spaces[i] = ' ' + } + + var n int64 + var err error + for _, token := range ts { + if err != nil { + return n, err + } + + for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { + thisChunk := spacesBefore + if thisChunk > len(spaces) { + thisChunk = len(spaces) + } + var thisN int + thisN, err = wr.Write(spaces[:thisChunk]) + n += int64(thisN) + if err != nil { + return n, err + } + } + + var thisN int + thisN, err = wr.Write(token.Bytes) + n += int64(thisN) + } + + return n, err +} + +func (ts Tokens) walkChildNodes(w internalWalkFunc) { + // Unstructured tokens have no child nodes +} + +func (ts Tokens) BuildTokens(to Tokens) Tokens { + return append(to, ts...) +} + +func newIdentToken(name string) *Token { + return &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(name), + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/ast.go b/vendor/github.com/hashicorp/hcl/v2/json/ast.go new file mode 100644 index 000000000..9c580ca34 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/ast.go @@ -0,0 +1,121 @@ +package json + +import ( + "math/big" + + "github.com/hashicorp/hcl/v2" +) + +type node interface { + Range() hcl.Range + StartRange() hcl.Range +} + +type objectVal struct { + Attrs []*objectAttr + SrcRange hcl.Range // range of the entire object, brace-to-brace + OpenRange hcl.Range // range of the opening brace + CloseRange hcl.Range // range of the closing brace +} + +func (n *objectVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *objectVal) StartRange() hcl.Range { + return n.OpenRange +} + +type objectAttr struct { + Name string + Value node + NameRange hcl.Range // range of the name string +} + +func (n *objectAttr) Range() hcl.Range { + return n.NameRange +} + +func (n *objectAttr) StartRange() hcl.Range { + return n.NameRange +} + +type arrayVal struct { + Values []node + SrcRange hcl.Range // range of the entire object, bracket-to-bracket + OpenRange hcl.Range // range of the opening bracket +} + +func (n *arrayVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *arrayVal) StartRange() hcl.Range { + return n.OpenRange +} + +type booleanVal struct { + Value bool + SrcRange hcl.Range +} + +func (n *booleanVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *booleanVal) StartRange() hcl.Range { + return n.SrcRange +} + +type numberVal struct { + Value *big.Float + SrcRange hcl.Range +} + +func (n *numberVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *numberVal) StartRange() hcl.Range { + return n.SrcRange +} + +type stringVal struct { + Value string + SrcRange hcl.Range +} + +func (n *stringVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *stringVal) StartRange() hcl.Range { + return n.SrcRange +} + +type nullVal struct { + SrcRange hcl.Range +} + +func (n *nullVal) Range() hcl.Range { + return n.SrcRange +} + +func (n *nullVal) StartRange() hcl.Range { + return n.SrcRange +} + +// invalidVal is used as a placeholder where a value is needed for a valid +// parse tree but the input was invalid enough to prevent one from being +// created. +type invalidVal struct { + SrcRange hcl.Range +} + +func (n invalidVal) Range() hcl.Range { + return n.SrcRange +} + +func (n invalidVal) StartRange() hcl.Range { + return n.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go new file mode 100644 index 000000000..fbdd8bff5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/didyoumean.go @@ -0,0 +1,33 @@ +package json + +import ( + "github.com/agext/levenshtein" +) + +var keywords = []string{"false", "true", "null"} + +// keywordSuggestion tries to find a valid JSON keyword that is close to the +// given string and returns it if found. If no keyword is close enough, returns +// the empty string. +func keywordSuggestion(given string) string { + return nameSuggestion(given, keywords) +} + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/doc.go b/vendor/github.com/hashicorp/hcl/v2/json/doc.go new file mode 100644 index 000000000..84d731939 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/doc.go @@ -0,0 +1,12 @@ +// Package json is the JSON parser for HCL. It parses JSON files and returns +// implementations of the core HCL structural interfaces in terms of the +// JSON data inside. +// +// This is not a generic JSON parser. Instead, it deals with the mapping from +// the JSON information model to the HCL information model, using a number +// of hard-coded structural conventions. +// +// In most cases applications will not import this package directly, but will +// instead access its functionality indirectly through functions in the main +// "hcl" package and in the "hclparse" package. +package json diff --git a/vendor/github.com/hashicorp/hcl/v2/json/navigation.go b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go new file mode 100644 index 000000000..bc8a97f74 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/navigation.go @@ -0,0 +1,70 @@ +package json + +import ( + "fmt" + "strings" +) + +type navigation struct { + root node +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + steps := navigationStepsRev(n.root, offset) + if steps == nil { + return "" + } + + // We built our slice backwards, so we'll reverse it in-place now. + half := len(steps) / 2 // integer division + for i := 0; i < half; i++ { + steps[i], steps[len(steps)-1-i] = steps[len(steps)-1-i], steps[i] + } + + ret := strings.Join(steps, "") + if len(ret) > 0 && ret[0] == '.' { + ret = ret[1:] + } + return ret +} + +func navigationStepsRev(v node, offset int) []string { + switch tv := v.(type) { + case *objectVal: + // Do any of our properties have an object that contains the target + // offset? + for _, attr := range tv.Attrs { + k := attr.Name + av := attr.Value + + switch av.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if av.Range().ContainsOffset(offset) { + return append(navigationStepsRev(av, offset), "."+k) + } + } + case *arrayVal: + // Do any of our elements contain the target offset? + for i, elem := range tv.Values { + + switch elem.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if elem.Range().ContainsOffset(offset) { + return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/parser.go b/vendor/github.com/hashicorp/hcl/v2/json/parser.go new file mode 100644 index 000000000..7a54c51b6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/parser.go @@ -0,0 +1,496 @@ +package json + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{ + Filename: filename, + Pos: hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + }) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseValue(p *peeker) (node, hcl.Diagnostics) { + tok := p.Peek() + + wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { + if n != nil { + return n, diags + } + return invalidVal{tok.Range}, diags + } + + switch tok.Type { + case tokenBraceO: + return wrapInvalid(parseObject(p)) + case tokenBrackO: + return wrapInvalid(parseArray(p)) + case tokenNumber: + return wrapInvalid(parseNumber(p)) + case tokenString: + return wrapInvalid(parseString(p)) + case tokenKeyword: + return wrapInvalid(parseKeyword(p)) + case tokenBraceC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing JSON value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenBrackC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing array element value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenEOF: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing value", + Detail: "The JSON data ends prematurely.", + Subject: &tok.Range, + }, + }) + default: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid start of value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + } +} + +func tokenCanStartValue(tok token) bool { + switch tok.Type { + case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: + return true + default: + return false + } +} + +func parseObject(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + attrs := []*objectAttr{} + + // recover is used to shift the peeker to what seems to be the end of + // our object, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBraceO: + open++ + case tokenBraceC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBraceC { + break Token + } + + keyNode, keyDiags := parseValue(p) + diags = diags.Extend(keyDiags) + if keyNode == nil { + return nil, diags + } + + keyStrNode, ok := keyNode.(*stringVal) + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object property name", + Detail: "A JSON object property name must be a string", + Subject: keyNode.StartRange().Ptr(), + }) + } + + key := keyStrNode.Value + + colon := p.Read() + if colon.Type != tokenColon { + recover(colon) + + if colon.Type == tokenBraceC || colon.Type == tokenComma { + // Catch common mistake of using braces instead of brackets + // for an object. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing object value", + Detail: "A JSON object attribute must have a value, introduced by a colon.", + Subject: &colon.Range, + }) + } + + if colon.Type == tokenEquals { + // Possible confusion with native HCL syntax. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", + Subject: &colon.Range, + }) + } + + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "A colon must appear between an object property's name and its value.", + Subject: &colon.Range, + }) + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + attrs = append(attrs, &objectAttr{ + Name: key, + Value: valNode, + NameRange: keyStrNode.SrcRange, + }) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBraceC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in object", + Detail: "JSON does not permit a trailing comma after the final property in an object.", + Subject: &comma.Range, + }) + } + continue Token + case tokenEOF: + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing brace was found for this JSON object.", + Subject: &open.Range, + }) + case tokenBrackC: + // Consume the bracket anyway, so that we don't return with the peeker + // at a strange place. + p.Read() + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched braces", + Detail: "A JSON object must be closed with a brace, not a bracket.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBraceC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each property definition in an object.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &objectVal{ + Attrs: attrs, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +func parseArray(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + vals := []node{} + + // recover is used to shift the peeker to what seems to be the end of + // our array, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBrackO: + open++ + case tokenBrackC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBrackC { + break Token + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + vals = append(vals, valNode) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBrackC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in array", + Detail: "JSON does not permit a trailing comma after the final value in an array.", + Subject: &comma.Range, + }) + } + continue Token + case tokenColon: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid array value", + Detail: "A colon is not used to introduce values in a JSON array.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenEOF: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing bracket was found for this JSON array.", + Subject: &open.Range, + }) + case tokenBraceC: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched brackets", + Detail: "A JSON array must be closed with a bracket, not a brace.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBrackC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each value in an array.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &arrayVal{ + Values: vals, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func parseNumber(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + + // Use encoding/json to validate the number syntax. + // TODO: Do this more directly to produce better diagnostics. + var num json.Number + err := json.Unmarshal(tok.Bytes, &num) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + // We want to guarantee that we parse numbers the same way as cty (and thus + // native syntax HCL) would here, so we'll use the cty parser even though + // in most other cases we don't actually introduce cty concepts until + // decoding time. We'll unwrap the parsed float immediately afterwards, so + // the cty value is just a temporary helper. + nv, err := cty.ParseNumberVal(string(num)) + if err != nil { + // Should never happen if above passed, since JSON numbers are a subset + // of what cty can parse... + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + return &numberVal{ + Value: nv.AsBigFloat(), + SrcRange: tok.Range, + }, nil +} + +func parseString(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + var str string + err := json.Unmarshal(tok.Bytes, &str) + + if err != nil { + var errRange hcl.Range + if serr, ok := err.(*json.SyntaxError); ok { + errOfs := serr.Offset + errPos := tok.Range.Start + errPos.Byte += int(errOfs) + + // TODO: Use the byte offset to properly count unicode + // characters for the column, and mark the whole of the + // character that was wrong as part of our range. + errPos.Column += int(errOfs) + + errEndPos := errPos + errEndPos.Byte++ + errEndPos.Column++ + + errRange = hcl.Range{ + Filename: tok.Range.Filename, + Start: errPos, + End: errEndPos, + } + } else { + errRange = tok.Range + } + + var contextRange *hcl.Range + if errRange != tok.Range { + contextRange = &tok.Range + } + + // FIXME: Eventually we should parse strings directly here so + // we can produce a more useful error message in the face fo things + // such as invalid escapes, etc. + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON string", + Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), + Subject: &errRange, + Context: contextRange, + }, + } + } + + return &stringVal{ + Value: str, + SrcRange: tok.Range, + }, nil +} + +func parseKeyword(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + s := string(tok.Bytes) + + switch s { + case "true": + return &booleanVal{ + Value: true, + SrcRange: tok.Range, + }, nil + case "false": + return &booleanVal{ + Value: false, + SrcRange: tok.Range, + }, nil + case "null": + return &nullVal{ + SrcRange: tok.Range, + }, nil + case "undefined", "NaN", "Infinity": + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), + Subject: &tok.Range, + }, + } + default: + var dym string + if suggest := keywordSuggestion(s); suggest != "" { + dym = fmt.Sprintf(" Did you mean %q?", suggest) + } + + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), + Subject: &tok.Range, + }, + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/peeker.go b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go new file mode 100644 index 000000000..fc7bbf582 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/peeker.go @@ -0,0 +1,25 @@ +package json + +type peeker struct { + tokens []token + pos int +} + +func newPeeker(tokens []token) *peeker { + return &peeker{ + tokens: tokens, + pos: 0, + } +} + +func (p *peeker) Peek() token { + return p.tokens[p.pos] +} + +func (p *peeker) Read() token { + ret := p.tokens[p.pos] + if ret.Type != tokenEOF { + p.pos++ + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/public.go b/vendor/github.com/hashicorp/hcl/v2/json/public.go new file mode 100644 index 000000000..8dc4a36af --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/public.go @@ -0,0 +1,94 @@ +package json + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl/v2" +) + +// Parse attempts to parse the given buffer as JSON and, if successful, returns +// a hcl.File for the HCL configuration represented by it. +// +// This is not a generic JSON parser. Instead, it deals only with the profile +// of JSON used to express HCL configuration. +// +// The returned file is valid only if the returned diagnostics returns false +// from its HasErrors method. If HasErrors returns true, the file represents +// the subset of data that was able to be parsed, which may be none. +func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename) + + switch rootNode.(type) { + case *objectVal, *arrayVal: + // okay + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Root value must be object", + Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", + Subject: rootNode.StartRange().Ptr(), + }) + + // Since we've already produced an error message for this being + // invalid, we'll return an empty placeholder here so that trying to + // extract content from our root body won't produce a redundant + // error saying the same thing again in more general terms. + fakePos := hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + } + fakeRange := hcl.Range{ + Filename: filename, + Start: fakePos, + End: fakePos, + } + rootNode = &objectVal{ + Attrs: []*objectAttr{}, + SrcRange: fakeRange, + OpenRange: fakeRange, + } + } + + file := &hcl.File{ + Body: &body{ + val: rootNode, + }, + Bytes: src, + Nav: navigation{rootNode}, + } + return file, diags +} + +// ParseFile is a convenience wrapper around Parse that first attempts to load +// data from the given filename, passing the result to Parse if successful. +// +// If the file cannot be read, an error diagnostic with nil context is returned. +func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { + f, err := os.Open(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to open file", + Detail: fmt.Sprintf("The file %q could not be opened.", filename), + }, + } + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), + }, + } + } + + return Parse(src, filename) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/scanner.go b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go new file mode 100644 index 000000000..912eabce2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/scanner.go @@ -0,0 +1,297 @@ +package json + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl/v2" +) + +//go:generate stringer -type tokenType scanner.go +type tokenType rune + +const ( + tokenBraceO tokenType = '{' + tokenBraceC tokenType = '}' + tokenBrackO tokenType = '[' + tokenBrackC tokenType = ']' + tokenComma tokenType = ',' + tokenColon tokenType = ':' + tokenKeyword tokenType = 'K' + tokenString tokenType = 'S' + tokenNumber tokenType = 'N' + tokenEOF tokenType = '␄' + tokenInvalid tokenType = 0 + tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax +) + +type token struct { + Type tokenType + Bytes []byte + Range hcl.Range +} + +// scan returns the primary tokens for the given JSON buffer in sequence. +// +// The responsibility of this pass is to just mark the slices of the buffer +// as being of various types. It is lax in how it interprets the multi-byte +// token types keyword, string and number, preferring to capture erroneous +// extra bytes that we presume the user intended to be part of the token +// so that we can generate more helpful diagnostics in the parser. +func scan(buf []byte, start pos) []token { + var tokens []token + p := start + for { + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + buf, p = skipWhitespace(buf, p) + + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + start = p + + first := buf[0] + switch { + case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': + p.Pos.Column++ + p.Pos.Byte++ + tokens = append(tokens, token{ + Type: tokenType(first), + Bytes: buf[0:1], + Range: posRange(start, p), + }) + buf = buf[1:] + case first == '"': + var tokBuf []byte + tokBuf, buf, p = scanString(buf, p) + tokens = append(tokens, token{ + Type: tokenString, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartNumber(first): + var tokBuf []byte + tokBuf, buf, p = scanNumber(buf, p) + tokens = append(tokens, token{ + Type: tokenNumber, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartKeyword(first): + var tokBuf []byte + tokBuf, buf, p = scanKeyword(buf, p) + tokens = append(tokens, token{ + Type: tokenKeyword, + Bytes: tokBuf, + Range: posRange(start, p), + }) + default: + tokens = append(tokens, token{ + Type: tokenInvalid, + Bytes: buf[:1], + Range: start.Range(1, 1), + }) + // If we've encountered an invalid then we might as well stop + // scanning since the parser won't proceed beyond this point. + return tokens + } + } +} + +func byteCanStartNumber(b byte) bool { + switch b { + // We are slightly more tolerant than JSON requires here since we + // expect the parser will make a stricter interpretation of the + // number bytes, but we specifically don't allow 'e' or 'E' here + // since we want the scanner to treat that as the start of an + // invalid keyword instead, to produce more intelligible error messages. + case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't check that the sequence of digit-ish bytes is + // in a valid order. The parser must do this when decoding a number + // token. + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func byteCanStartKeyword(b byte) bool { + switch { + // We allow any sequence of alphabetical characters here, even though + // JSON is more constrained, so that we can collect what we presume + // the user intended to be a single keyword and then check its validity + // in the parser, where we can generate better diagnostics. + // So e.g. we want to be able to say: + // unrecognized keyword "True". Did you mean "true"? + case isAlphabetical(b): + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case isAlphabetical(b) || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} + +func isAlphabetical(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/spec.md b/vendor/github.com/hashicorp/hcl/v2/json/spec.md new file mode 100644 index 000000000..dac5729d4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/spec.md @@ -0,0 +1,405 @@ +# HCL JSON Syntax Specification + +This is the specification for the JSON serialization for hcl. HCL is a system +for defining configuration languages for applications. The HCL information +model is designed to support multiple concrete syntaxes for configuration, +and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) +by being easy to machine-generate, whereas the native syntax is oriented +towards human authoring and maintenance + +This syntax is defined in terms of JSON as defined in +[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON +grammar as-is, and merely defines a specific methodology for interpreting +JSON constructs into HCL structural elements and expressions. + +This mapping is defined such that valid JSON-serialized HCL input can be +_produced_ using standard JSON implementations in various programming languages. +_Parsing_ such JSON has some additional constraints not beyond what is normally +supported by JSON parsers, so a specialized parser may be required that +is able to: + +- Preserve the relative ordering of properties defined in an object. +- Preserve multiple definitions of the same property name. +- Preserve numeric values to the precision required by the number type + in [the HCL syntax-agnostic information model](../spec.md). +- Retain source location information for parsed tokens/constructs in order + to produce good error messages. + +## Structural Elements + +[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an +abstract container for attribute definitions and child blocks. A body is +represented in JSON as either a single JSON object or a JSON array of objects. + +Body processing is in terms of JSON object properties, visited in the order +they appear in the input. Where a body is represented by a single JSON object, +the properties of that object are visited in order. Where a body is +represented by a JSON array, each of its elements are visited in order and +each element has its properties visited in order. If any element of the array +is not a JSON object then the input is erroneous. + +When a body is being processed in the _dynamic attributes_ mode, the allowance +of a JSON array in the previous paragraph does not apply and instead a single +JSON object is always required. + +As defined in the language-agnostic model, body processing is in terms +of a schema which provides context for interpreting the body's content. For +JSON bodies, the schema is crucial to allow differentiation of attribute +definitions and block definitions, both of which are represented via object +properties. + +The special property name `"//"`, when used in an object representing a HCL +body, is parsed and ignored. A property with this name can be used to +include human-readable comments. (This special property name is _not_ +processed in this way for any _other_ HCL constructs that are represented as +JSON objects.) + +### Attributes + +Where the given schema describes an attribute with a given name, the object +property with the matching name — if present — serves as the attribute's +definition. + +When a body is being processed in the _dynamic attributes_ mode, each object +property serves as an attribute definition for the attribute whose name +matches the property name. + +The value of an attribute definition property is interpreted as an _expression_, +as described in a later section. + +Given a schema that calls for an attribute named "foo", a JSON object like +the following provides a definition for that attribute: + +```json +{ + "foo": "bar baz" +} +``` + +### Blocks + +Where the given schema describes a block with a given type name, each object +property with the matching name serves as a definition of zero or more blocks +of that type. + +Processing of child blocks is in terms of nested JSON objects and arrays. +If the schema defines one or more _labels_ for the block type, a nested JSON +object or JSON array of objects is required for each labelling level. These +are flattened to a single ordered sequence of object properties using the +same algorithm as for body content as defined above. Each object property +serves as a label value at the corresponding level. + +After any labelling levels, the next nested value is either a JSON object +representing a single block body, or a JSON array of JSON objects that each +represent a single block body. Use of an array accommodates the definition +of multiple blocks that have identical type and labels. + +Given a schema that calls for a block type named "foo" with no labels, the +following JSON objects are all valid definitions of zero or more blocks of this +type: + +```json +{ + "foo": { + "child_attr": "baz" + } +} +``` + +```json +{ + "foo": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] +} +``` + +```json +{ + "foo": [] +} +``` + +The first of these defines a single child block of type "foo". The second +defines _two_ such blocks. The final example shows a degenerate definition +of zero blocks, though generators should prefer to omit the property entirely +in this scenario. + +Given a schema that calls for a block type named "foo" with _two_ labels, the +extra label levels must be represented as objects or arrays of objects as in +the following examples: + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": { + "child_attr": "baz" + } + } + } +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +```json +{ + "foo": [ + { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + } + }, + { + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } + ] +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +Arrays can be introduced at either the label definition or block body +definition levels to define multiple definitions of the same block type +or labels while preserving order. + +A JSON HCL parser _must_ support duplicate definitions of the same property +name within a single object, preserving all of them and the relative ordering +between them. The array-based forms are also required so that JSON HCL +configurations can be produced with JSON producing libraries that are not +able to preserve property definition order and multiple definitions of +the same property. + +## Expressions + +JSON lacks a native expression syntax, so the HCL JSON syntax instead defines +a mapping for each of the JSON value types, including a special mapping for +strings that allows optional use of arbitrary expressions. + +### Objects + +When interpreted as an expression, a JSON object represents a value of a HCL +object type. + +Each property of the JSON object represents an attribute of the HCL object type. +The property name string given in the JSON input is interpreted as a string +expression as described below, and its result is converted to string as defined +by the syntax-agnostic information model. If such a conversion is not possible, +an error is produced and evaluation fails. + +An instance of the constructed object type is then created, whose values +are interpreted by again recursively applying the mapping rules defined in +this section to each of the property values. + +If any evaluated property name strings produce null values, an error is +produced and evaluation fails. If any produce _unknown_ values, the _entire +object's_ result is an unknown value of the dynamic pseudo-type, signalling +that the type of the object cannot be determined. + +It is an error to define the same property name multiple times within a single +JSON object interpreted as an expression. In full expression mode, this +constraint applies to the name expression results after conversion to string, +rather than the raw string that may contain interpolation expressions. + +### Arrays + +When interpreted as an expression, a JSON array represents a value of a HCL +tuple type. + +Each element of the JSON array represents an element of the HCL tuple type. +The tuple type is constructed by enumerating the JSON array elements, creating +for each an element whose type is the result of recursively applying the +expression mapping rules. Correspondence is preserved between the array element +indices and the tuple element indices. + +An instance of the constructed tuple type is then created, whose values are +interpreted by again recursively applying the mapping rules defined in this +section. + +### Numbers + +When interpreted as an expression, a JSON number represents a HCL number value. + +HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must +be able to translate exactly the value given to a number of corresponding +precision, within the constraints set by the HCL syntax-agnostic information +model. + +In practice, off-the-shelf JSON serializers often do not support customizing the +processing of numbers, and instead force processing as 32-bit or 64-bit +floating point values. + +A _producer_ of JSON HCL that uses such a serializer can provide numeric values +as JSON strings where they have precision too great for representation in the +serializer's chosen numeric type in situations where the result will be +converted to number (using the standard conversion rules) by a calling +application. + +Alternatively, for expressions that are evaluated in full expression mode an +embedded template interpolation can be used to faithfully represent a number, +such as `"${1e150}"`, which will then be evaluated by the underlying HCL native +syntax expression evaluator. + +### Boolean Values + +The JSON boolean values `true` and `false`, when interpreted as expressions, +represent the corresponding HCL boolean values. + +### The Null Value + +The JSON value `null`, when interpreted as an expression, represents a +HCL null value of the dynamic pseudo-type. + +### Strings + +When interpreted as an expression, a JSON string may be interpreted in one of +two ways depending on the evaluation mode. + +If evaluating in literal-only mode (as defined by the syntax-agnostic +information model) the literal string is intepreted directly as a HCL string +value, by directly using the exact sequence of unicode characters represented. +Template interpolations and directives MUST NOT be processed in this mode, +allowing any characters that appear as introduction sequences to pass through +literally: + +```json +"Hello world! Template sequences like ${ are not intepreted here." +``` + +When evaluating in full expression mode (again, as defined by the syntax- +agnostic information model) the literal string is instead interpreted as a +_standalone template_ in the HCL Native Syntax. The expression evaluation +result is then the direct result of evaluating that template with the current +variable scope and function table. + +```json +"Hello, ${name}! Template sequences are interpreted in full expression mode." +``` + +In particular the _Template Interpolation Unwrapping_ requirement from the +HCL native syntax specification must be implemented, allowing the use of +single-interpolation templates to represent expressions that would not +otherwise be representable in JSON, such as the following example where +the result must be a number, rather than a string representation of a number: + +```json +"${ a + b }" +``` + +## Static Analysis + +The HCL static analysis operations are implemented for JSON values that +represent expressions, as described in the following sections. + +Due to the limited expressive power of the JSON syntax alone, use of these +static analyses functions rather than normal expression evaluation is used +as additional context for how a JSON value is to be interpreted, which means +that static analyses can result in a different interpretation of a given +expression than normal evaluation. + +### Static List + +An expression interpreted as a static list must be a JSON array. Each of the +values in the array is interpreted as an expression and returned. + +### Static Map + +An expression interpreted as a static map must be a JSON object. Each of the +key/value pairs in the object is presented as a pair of expressions. Since +object property names are always strings, evaluating the key expression with +a non-`nil` evaluation context will evaluate any template sequences given +in the property name. + +### Static Call + +An expression interpreted as a static call must be a string. The content of +the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then the static call analysis is delegated to +that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + +### Static Traversal + +An expression interpreted as a static traversal must be a string. The content +of the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then static traversal analysis is delegated +to that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl/v2/json/structure.go b/vendor/github.com/hashicorp/hcl/v2/json/structure.go new file mode 100644 index 000000000..76c9d7399 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/structure.go @@ -0,0 +1,637 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + val node + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + for _, attr := range jsonAttrs { + k := attr.Name + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + // Create some more convenient data structures for our work below. + attrSchemas := map[string]hcl.AttributeSchema{} + blockSchemas := map[string]hcl.BlockHeaderSchema{} + for _, attrS := range schema.Attributes { + attrSchemas[attrS.Name] = attrS + } + for _, blockS := range schema.Blocks { + blockSchemas[blockS.Type] = blockS + } + + for _, jsonAttr := range jsonAttrs { + attrName := jsonAttr.Name + if _, used := b.hiddenAttrs[attrName]; used { + continue + } + + if attrS, defined := attrSchemas[attrName]; defined { + if existing, exists := content.Attributes[attrName]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), + Subject: &jsonAttr.NameRange, + Context: jsonAttr.Range().Ptr(), + }) + continue + } + + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrName] = struct{}{} + + } else if blockS, defined := blockSchemas[attrName]; defined { + bv := jsonAttr.Value + blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) + diags = append(diags, blockDiags...) + usedNames[attrName] = struct{}{} + } + + // We ignore anything that isn't defined because that's the + // PartialContent contract. The Content method will catch leftovers. + } + + // Make sure we got all the required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + if _, defined := content.Attributes[attrS.Name]; !defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + } + + unusedBody := &body{ + val: b.val, + hiddenAttrs: usedNames, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + attrs := make(map[string]*hcl.Attribute) + + obj, ok := b.val.(*objectVal) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, setting the arguments for this block.", + Subject: b.val.StartRange().Ptr(), + }) + return attrs, diags + } + + for _, jsonAttr := range obj.Attrs { + name := jsonAttr.Name + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + + if existing, exists := attrs[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), + Subject: &jsonAttr.NameRange, + }) + continue + } + + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, diags +} + +func (b *body) MissingItemRange() hcl.Range { + switch tv := b.val.(type) { + case *objectVal: + return tv.CloseRange + case *arrayVal: + return tv.OpenRange + default: + // Should not happen in correct operation, but might show up if the + // input is invalid and we are producing partial results. + return tv.StartRange() + } +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) + diags = append(diags, attrDiags...) + + if len(jsonAttrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for _, p := range jsonAttrs { + pk := p.Name + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *nullVal: + // There is no block content, e.g the value is null. + return + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: tv, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: av, // might be mistyped; we'll find out when content is requested for this body + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +// collectDeepAttrs takes either a single object or an array of objects and +// flattens it into a list of object attributes, collecting attributes from +// all of the objects in a given array. +// +// Ordering is preserved, so a list of objects that each have one property +// will result in those properties being returned in the same order as the +// objects appeared in the array. +// +// This is appropriate for use only for objects representing bodies or labels +// within a block. +// +// The labelName argument, if non-null, is used to tailor returned error +// messages to refer to block labels rather than attributes and child blocks. +// It has no other effect. +func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { + var diags hcl.Diagnostics + var attrs []*objectAttr + + switch tv := v.(type) { + case *nullVal: + // If a value is null, then we don't return any attributes or return an error. + + case *objectVal: + attrs = append(attrs, tv.Attrs...) + + case *arrayVal: + for _, ev := range tv.Values { + switch tev := ev.(type) { + case *objectVal: + attrs = append(attrs, tev.Attrs...) + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), + Subject: ev.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, to define arguments and child blocks.", + Subject: ev.StartRange().Ptr(), + }) + } + } + } + + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), + Subject: v.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", + Subject: v.StartRange().Ptr(), + }) + } + } + + return attrs, diags +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a HCL native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + var diags hcl.Diagnostics + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, valDiags := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + diags = append(diags, valDiags...) + } + return cty.TupleVal(vals), diags + case *objectVal: + var diags hcl.Diagnostics + attrs := map[string]cty.Value{} + attrRanges := map[string]hcl.Range{} + known := true + for _, jsonAttr := range v.Attrs { + // In this one context we allow keys to contain interpolation + // expressions too, assuming we're evaluating in interpolation + // mode. This achieves parity with the native syntax where + // object expressions can have dynamic keys, while block contents + // may not. + name, nameDiags := (&expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}).Value(ctx) + valExpr := &expression{src: jsonAttr.Value} + val, valDiags := valExpr.Value(ctx) + diags = append(diags, nameDiags...) + diags = append(diags, valDiags...) + + var err error + name, err = convert.Convert(name, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if name.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: "Cannot use null value as an object key.", + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if !name.IsKnown() { + // This is a bit of a weird case, since our usual rules require + // us to tolerate unknowns and just represent the result as + // best we can but if we don't know the key then we can't + // know the type of our object at all, and thus we must turn + // the whole thing into cty.DynamicVal. This is consistent with + // how this situation is handled in the native syntax. + // We'll keep iterating so we can collect other errors in + // subsequent attributes. + known = false + continue + } + nameStr := name.AsString() + if _, defined := attrs[nameStr]; defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object attribute", + Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), + Subject: &jsonAttr.NameRange, + Expression: e, + EvalContext: ctx, + }) + continue + } + attrs[nameStr] = val + attrRanges[nameStr] = jsonAttr.NameRange + } + if !known { + // We encountered an unknown key somewhere along the way, so + // we can't know what our type will eventually be. + return cty.DynamicVal, diags + } + return cty.ObjectVal(attrs), diags + case *nullVal: + return cty.NullVal(cty.DynamicPseudoType), nil + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + keyExpr := &stringVal{ // we're going to treat key as an expression in this context + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + } + vars = append(vars, (&expression{src: keyExpr}).Variables()...) + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprCall. +func (e *expression) ExprCall() *hcl.StaticCall { + // In JSON-based syntax a static call is given as a string containing + // an expression in the native syntax that also supports ExprCall. + + switch v := e.src.(type) { + case *stringVal: + expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return nil + } + + return call + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} + +// Implementation for hcl.ExprMap. +func (e *expression) ExprMap() []hcl.KeyValuePair { + switch v := e.src.(type) { + case *objectVal: + ret := make([]hcl.KeyValuePair, len(v.Attrs)) + for i, jsonAttr := range v.Attrs { + ret[i] = hcl.KeyValuePair{ + Key: &expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}, + Value: &expression{src: jsonAttr.Value}, + } + } + return ret + default: + return nil + } +} diff --git a/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go new file mode 100644 index 000000000..bbcce5b30 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "strconv" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/hashicorp/hcl/v2/merged.go b/vendor/github.com/hashicorp/hcl/v2/merged.go new file mode 100644 index 000000000..96e62a58d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers, thisLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf( + "The argument %q is required, but was not set.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/vendor/github.com/hashicorp/hcl/v2/ops.go b/vendor/github.com/hashicorp/hcl/v2/ops.go new file mode 100644 index 000000000..5d2910c13 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/ops.go @@ -0,0 +1,288 @@ +package hcl + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the HCL expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + // We have a more specialized error message for the situation of + // using a fractional number to index into a sequence, because + // that will tend to happen if the user is trying to use division + // to calculate an index and not realizing that HCL does float + // division rather than integer division. + if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) { + if key.IsKnown() && !key.IsNull() { + bf := key.AsBigFloat() + if _, acc := bf.Int(nil); acc != big.Exact { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf("The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index (%g) has a fractional part.", bf), + Subject: srcRange, + }, + } + } + } + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} + +// GetAttr is a helper function that performs the same operation as the +// attribute access in the HCL expression language. That is, the result is the +// same as it would be for obj.attr in a configuration expression. +// +// This is exported so that applications can access attributes in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) { + if obj.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: srcRange, + }, + } + } + + ty := obj.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName), + Subject: srcRange, + }, + } + } + + if !obj.IsKnown() { + return cty.UnknownVal(ty.AttributeType(attrName)), nil + } + + return obj.GetAttr(attrName), nil + case ty.IsMapType(): + if !obj.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(attrName) + if obj.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName), + Subject: srcRange, + }, + } + } + + return obj.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: srcRange, + }, + } + } + +} + +// ApplyPath is a helper function that applies a cty.Path to a value using the +// indexing and attribute access operations from HCL. +// +// This is similar to calling the path's own Apply method, but ApplyPath uses +// the more relaxed typing rules that apply to these operations in HCL, rather +// than cty's relatively-strict rules. ApplyPath is implemented in terms of +// Index and GetAttr, and so it has the same behavior for individual steps +// but will stop and return any errors returned by intermediate steps. +// +// Diagnostics are produced if the given path cannot be applied to the given +// value. Therefore a pointer to a source range must be provided to use in +// diagnostics, though nil can be provided if the calling application is going +// to ignore the subject of the returned diagnostics anyway. +func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) { + var diags Diagnostics + + for _, step := range path { + var stepDiags Diagnostics + switch ts := step.(type) { + case cty.IndexStep: + val, stepDiags = Index(val, ts.Key, srcRange) + case cty.GetAttrStep: + val, stepDiags = GetAttr(val, ts.Name, srcRange) + default: + // Should never happen because the above are all of the step types. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Invalid path step", + Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step), + Subject: srcRange, + }) + return cty.DynamicVal, diags + } + + diags = append(diags, stepDiags...) + if stepDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + return val, diags +} diff --git a/vendor/github.com/hashicorp/hcl/v2/pos.go b/vendor/github.com/hashicorp/hcl/v2/pos.go new file mode 100644 index 000000000..06db8bfbd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/pos.go @@ -0,0 +1,275 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// InitialPos is a suitable position to use to mark the start of a file. +var InitialPos = Pos{Byte: 0, Line: 1, Column: 1} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + +// ContainsPos returns true if and only if the given position is contained within +// the receiving range. +// +// In the unlikely case that the line/column information disagree with the byte +// offset information in the given position or receiving range, the byte +// offsets are given priority. +func (r Range) ContainsPos(pos Pos) bool { + return r.ContainsOffset(pos.Byte) +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go new file mode 100644 index 000000000..17c0d7c6b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go @@ -0,0 +1,152 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// NewRangeScanner creates a new RangeScanner for the given buffer, producing +// ranges for the given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return NewRangeScannerFragment(b, filename, InitialPos, cb) +} + +// NewRangeScannerFragment is like NewRangeScanner but the ranges it produces +// will be offset by the given starting position, which is appropriate for +// sub-slices of a file, whereas NewRangeScanner assumes it is scanning an +// entire file. +func NewRangeScannerFragment(b []byte, filename string, start Pos, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: start, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/vendor/github.com/hashicorp/hcl/v2/schema.go b/vendor/github.com/hashicorp/hcl/v2/schema.go new file mode 100644 index 000000000..891257acb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/vendor/github.com/hashicorp/hcl/v2/spec.md b/vendor/github.com/hashicorp/hcl/v2/spec.md new file mode 100644 index 000000000..97ef61318 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/spec.md @@ -0,0 +1,691 @@ +# HCL Syntax-Agnostic Information Model + +This is the specification for the general information model (abstract types and +semantics) for hcl. HCL is a system for defining configuration languages for +applications. The HCL information model is designed to support multiple +concrete syntaxes for configuration, each with a mapping to the model defined +in this specification. + +The two primary syntaxes intended for use in conjunction with this model are +[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md). +In principle other syntaxes are possible as long as either their language model +is sufficiently rich to express the concepts described in this specification +or the language targets a well-defined subset of the specification. + +## Structural Elements + +The primary structural element is the _body_, which is a container representing +a set of zero or more _attributes_ and a set of zero or more _blocks_. + +A _configuration file_ is the top-level object, and will usually be produced +by reading a file from disk and parsing it as a particular syntax. A +configuration file has its own _body_, representing the top-level attributes +and blocks. + +An _attribute_ is a name and value pair associated with a body. Attribute names +are unique within a given body. Attribute values are provided as _expressions_, +which are discussed in detail in a later section. + +A _block_ is a nested structure that has a _type name_, zero or more string +_labels_ (e.g. identifiers), and a nested body. + +Together the structural elements create a hierarchical data structure, with +attributes intended to represent the direct properties of a particular object +in the calling application, and blocks intended to represent child objects +of a particular object. + +## Body Content + +To support the expression of the HCL concepts in languages whose information +model is a subset of HCL's, such as JSON, a _body_ is an opaque container +whose content can only be accessed by providing information on the expected +structure of the content. + +The specification for each syntax must describe how its physical constructs +are mapped on to body content given a schema. For syntaxes that have +first-class syntax distinguishing attributes and bodies this can be relatively +straightforward, while more detailed mapping rules may be required in syntaxes +where the representation of attributes vs. blocks is ambiguous. + +### Schema-driven Processing + +Schema-driven processing is the primary way to access body content. +A _body schema_ is a description of what is expected within a particular body, +which can then be used to extract the _body content_, which then provides +access to the specific attributes and blocks requested. + +A _body schema_ consists of a list of _attribute schemata_ and +_block header schemata_: + +- An _attribute schema_ provides the name of an attribute and whether its + presence is required. + +- A _block header schema_ provides a block type name and the semantic names + assigned to each of the labels of that block type, if any. + +Within a schema, it is an error to request the same attribute name twice or +to request a block type whose name is also an attribute name. While this can +in principle be supported in some syntaxes, in other syntaxes the attribute +and block namespaces are combined and so an attribute cannot coexist with +a block whose type name is identical to the attribute name. + +The result of applying a body schema to a body is _body content_, which +consists of an _attribute map_ and a _block sequence_: + +- The _attribute map_ is a map data structure whose keys are attribute names + and whose values are _expressions_ that represent the corresponding attribute + values. + +- The _block sequence_ is an ordered sequence of blocks, with each specifying + a block _type name_, the sequence of _labels_ specified for the block, + and the body object (not body _content_) representing the block's own body. + +After obtaining _body content_, the calling application may continue processing +by evaluating attribute expressions and/or recursively applying further +schema-driven processing to the child block bodies. + +**Note:** The _body schema_ is intentionally minimal, to reduce the set of +mapping rules that must be defined for each syntax. Higher-level utility +libraries may be provided to assist in the construction of a schema and +perform additional processing, such as automatically evaluating attribute +expressions and assigning their result values into a data structure, or +recursively applying a schema to child blocks. Such utilities are not part of +this core specification and will vary depending on the capabilities and idiom +of the implementation language. + +### _Dynamic Attributes_ Processing + +The _schema-driven_ processing model is useful when the expected structure +of a body is known a priori by the calling application. Some blocks are +instead more free-form, such as a user-provided set of arbitrary key/value +pairs. + +The alternative _dynamic attributes_ processing mode allows for this more +ad-hoc approach. Processing in this mode behaves as if a schema had been +constructed without any _block header schemata_ and with an attribute +schema for each distinct key provided within the physical representation +of the body. + +The means by which _distinct keys_ are identified is dependent on the +physical syntax; this processing mode assumes that the syntax has a way +to enumerate keys provided by the author and identify expressions that +correspond with those keys, but does not define the means by which this is +done. + +The result of _dynamic attributes_ processing is an _attribute map_ as +defined in the previous section. No _block sequence_ is produced in this +processing mode. + +### Partial Processing of Body Content + +Under _schema-driven processing_, by default the given schema is assumed +to be exhaustive, such that any attribute or block not matched by schema +elements is considered an error. This allows feedback about unsupported +attributes and blocks (such as typos) to be provided. + +An alternative is _partial processing_, where any additional elements within +the body are not considered an error. + +Under partial processing, the result is both body content as described +above _and_ a new body that represents any body elements that remain after +the schema has been processed. + +Specifically: + +- Any attribute whose name is specified in the schema is returned in body + content and elided from the new body. + +- Any block whose type is specified in the schema is returned in body content + and elided from the new body. + +- Any attribute or block _not_ meeting the above conditions is placed into + the new body, unmodified. + +The new body can then be recursively processed using any of the body +processing models. This facility allows different subsets of body content +to be processed by different parts of the calling application. + +Processing a body in two steps — first partial processing of a source body, +then exhaustive processing of the returned body — is equivalent to single-step +processing with a schema that is the union of the schemata used +across the two steps. + +## Expressions + +Attribute values are represented by _expressions_. Depending on the concrete +syntax in use, an expression may just be a literal value or it may describe +a computation in terms of literal values, variables, and functions. + +Each syntax defines its own representation of expressions. For syntaxes based +in languages that do not have any non-literal expression syntax, it is +recommended to embed the template language from +[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on +string literals. + +### Expression Evaluation + +In order to obtain a concrete value, each expression must be _evaluated_. +Evaluation is performed in terms of an evaluation context, which +consists of the following: + +- An _evaluation mode_, which is defined below. +- A _variable scope_, which provides a set of named variables for use in + expressions. +- A _function table_, which provides a set of named functions for use in + expressions. + +The _evaluation mode_ allows for two different interpretations of an +expression: + +- In _literal-only mode_, variables and functions are not available and it + is assumed that the calling application's intent is to treat the attribute + value as a literal. + +- In _full expression mode_, variables and functions are defined and it is + assumed that the calling application wishes to provide a full expression + language for definition of the attribute value. + +The actual behavior of these two modes depends on the syntax in use. For +languages with first-class expression syntax, these two modes may be considered +equivalent, with _literal-only mode_ simply not defining any variables or +functions. For languages that embed arbitrary expressions via string templates, +_literal-only mode_ may disable such processing, allowing literal strings to +pass through without interpretation as templates. + +Since literal-only mode does not support variables and functions, it is an +error for the calling application to enable this mode and yet provide a +variable scope and/or function table. + +## Values and Value Types + +The result of expression evaluation is a _value_. Each value has a _type_, +which is dynamically determined during evaluation. The _variable scope_ in +the evaluation context is a map from variable name to value, using the same +definition of value. + +The type system for HCL values is intended to be of a level abstraction +suitable for configuration of various applications. A well-defined, +implementation-language-agnostic type system is defined to allow for +consistent processing of configuration across many implementation languages. +Concrete implementations may provide additional functionality to lower +HCL values and types to corresponding native language types, which may then +impose additional constraints on the values outside of the scope of this +specification. + +Two values are _equal_ if and only if they have identical types and their +values are equal according to the rules of their shared type. + +### Primitive Types + +The primitive types are _string_, _bool_, and _number_. + +A _string_ is a sequence of unicode characters. Two strings are equal if +NFC normalization ([UAX#15](http://unicode.org/reports/tr15/) +of each string produces two identical sequences of characters. +NFC normalization ensures that, for example, a precomposed combination of a +latin letter and a diacritic compares equal with the letter followed by +a combining diacritic. + +The _bool_ type has only two non-null values: _true_ and _false_. Two bool +values are equal if and only if they are either both true or both false. + +A _number_ is an arbitrary-precision floating point value. An implementation +_must_ make the full-precision values available to the calling application +for interpretation into any suitable number representation. An implementation +may in practice implement numbers with limited precision so long as the +following constraints are met: + +- Integers are represented with at least 256 bits. +- Non-integer numbers are represented as floating point values with a + mantissa of at least 256 bits and a signed binary exponent of at least + 16 bits. +- An error is produced if an integer value given in source cannot be + represented precisely. +- An error is produced if a non-integer value cannot be represented due to + overflow. +- A non-integer number is rounded to the nearest possible value when a + value is of too high a precision to be represented. + +The _number_ type also requires representation of both positive and negative +infinity. A "not a number" (NaN) value is _not_ provided nor used. + +Two number values are equal if they are numerically equal to the precision +associated with the number. Positive infinity and negative infinity are +equal to themselves but not to each other. Positive infinity is greater than +any other number value, and negative infinity is less than any other number +value. + +Some syntaxes may be unable to represent numeric literals of arbitrary +precision. This must be defined in the syntax specification as part of its +description of mapping numeric literals to HCL values. + +### Structural Types + +_Structural types_ are types that are constructed by combining other types. +Each distinct combination of other types is itself a distinct type. There +are two structural type _kinds_: + +- _Object types_ are constructed of a set of named attributes, each of which + has a type. Attribute names are always strings. (_Object_ attributes are a + distinct idea from _body_ attributes, though calling applications + may choose to blur the distinction by use of common naming schemes.) +- _Tuple types_ are constructed of a sequence of elements, each of which + has a type. + +Values of structural types are compared for equality in terms of their +attributes or elements. A structural type value is equal to another if and +only if all of the corresponding attributes or elements are equal. + +Two structural types are identical if they are of the same kind and +have attributes or elements with identical types. + +### Collection Types + +_Collection types_ are types that combine together an arbitrary number of +values of some other single type. There are three collection type _kinds_: + +- _List types_ represent ordered sequences of values of their element type. +- _Map types_ represent values of their element type accessed via string keys. +- _Set types_ represent unordered sets of distinct values of their element type. + +For each of these kinds and each distinct element type there is a distinct +collection type. For example, "list of string" is a distinct type from +"set of string", and "list of number" is a distinct type from "list of string". + +Values of collection types are compared for equality in terms of their +elements. A collection type value is equal to another if and only if both +have the same number of elements and their corresponding elements are equal. + +Two collection types are identical if they are of the same kind and have +the same element type. + +### Null values + +Each type has a null value. The null value of a type represents the absence +of a value, but with type information retained to allow for type checking. + +Null values are used primarily to represent the conditional absence of a +body attribute. In a syntax with a conditional operator, one of the result +values of that conditional may be null to indicate that the attribute should be +considered not present in that case. + +Calling applications _should_ consider an attribute with a null value as +equivalent to the value not being present at all. + +A null value of a particular type is equal to itself. + +### Unknown Values and the Dynamic Pseudo-type + +An _unknown value_ is a placeholder for a value that is not yet known. +Operations on unknown values themselves return unknown values that have a +type appropriate to the operation. For example, adding together two unknown +numbers yields an unknown number, while comparing two unknown values of any +type for equality yields an unknown bool. + +Each type has a distinct unknown value. For example, an unknown _number_ is +a distinct value from an unknown _string_. + +_The dynamic pseudo-type_ is a placeholder for a type that is not yet known. +The only values of this type are its null value and its unknown value. It is +referred to as a _pseudo-type_ because it should not be considered a type in +its own right, but rather as a placeholder for a type yet to be established. +The unknown value of the dynamic pseudo-type is referred to as _the dynamic +value_. + +Operations on values of the dynamic pseudo-type behave as if it is a value +of the expected type, optimistically assuming that once the value and type +are known they will be valid for the operation. For example, adding together +a number and the dynamic value produces an unknown number. + +Unknown values and the dynamic pseudo-type can be used as a mechanism for +partial type checking and semantic checking: by evaluating an expression with +all variables set to an unknown value, the expression can be evaluated to +produce an unknown value of a given type, or produce an error if any operation +is provably invalid with only type information. + +Unknown values and the dynamic pseudo-type must never be returned from +operations unless at least one operand is unknown or dynamic. Calling +applications are guaranteed that unless the global scope includes unknown +values, or the function table includes functions that return unknown values, +no expression will evaluate to an unknown value. The calling application is +thus in total control over the use and meaning of unknown values. + +The dynamic pseudo-type is identical only to itself. + +### Capsule Types + +A _capsule type_ is a custom type defined by the calling application. A value +of a capsule type is considered opaque to HCL, but may be accepted +by functions provided by the calling application. + +A particular capsule type is identical only to itself. The equality of two +values of the same capsule type is defined by the calling application. No +other operations are supported for values of capsule types. + +Support for capsule types in a HCL implementation is optional. Capsule types +are intended to allow calling applications to pass through values that are +not part of the standard type system. For example, an application that +deals with raw binary data may define a capsule type representing a byte +array, and provide functions that produce or operate on byte arrays. + +### Type Specifications + +In certain situations it is necessary to define expectations about the expected +type of a value. Whereas two _types_ have a commutative _identity_ relationship, +a type has a non-commutative _matches_ relationship with a _type specification_. +A type specification is, in practice, just a different interpretation of a +type such that: + +- Any type _matches_ any type that it is identical to. + +- Any type _matches_ the dynamic pseudo-type. + +For example, given a type specification "list of dynamic pseudo-type", the +concrete types "list of string" and "list of map" match, but the +type "set of string" does not. + +## Functions and Function Calls + +The evaluation context used to evaluate an expression includes a function +table, which represents an application-defined set of named functions +available for use in expressions. + +Each syntax defines whether function calls are supported and how they are +physically represented in source code, but the semantics of function calls are +defined here to ensure consistent results across syntaxes and to allow +applications to provide functions that are interoperable with all syntaxes. + +A _function_ is defined from the following elements: + +- Zero or more _positional parameters_, each with a name used for documentation, + a type specification for expected argument values, and a flag for whether + each of null values, unknown values, and values of the dynamic pseudo-type + are accepted. + +- Zero or one _variadic parameters_, with the same structure as the _positional_ + parameters, which if present collects any additional arguments provided at + the function call site. + +- A _result type definition_, which specifies the value type returned for each + valid sequence of argument values. + +- A _result value definition_, which specifies the value returned for each + valid sequence of argument values. + +A _function call_, regardless of source syntax, consists of a sequence of +argument values. The argument values are each mapped to a corresponding +parameter as follows: + +- For each of the function's positional parameters in sequence, take the next + argument. If there are no more arguments, the call is erroneous. + +- If the function has a variadic parameter, take all remaining arguments that + where not yet assigned to a positional parameter and collect them into + a sequence of variadic arguments that each correspond to the variadic + parameter. + +- If the function has _no_ variadic parameter, it is an error if any arguments + remain after taking one argument for each positional parameter. + +After mapping each argument to a parameter, semantic checking proceeds +for each argument: + +- If the argument value corresponding to a parameter does not match the + parameter's type specification, the call is erroneous. + +- If the argument value corresponding to a parameter is null and the parameter + is not specified as accepting nulls, the call is erroneous. + +- If the argument value corresponding to a parameter is the dynamic value + and the parameter is not specified as accepting values of the dynamic + pseudo-type, the call is valid but its _result type_ is forced to be the + dynamic pseudo type. + +- If neither of the above conditions holds for any argument, the call is + valid and the function's value type definition is used to determine the + call's _result type_. A function _may_ vary its result type depending on + the argument _values_ as well as the argument _types_; for example, a + function that decodes a JSON value will return a different result type + depending on the data structure described by the given JSON source code. + +If semantic checking succeeds without error, the call is _executed_: + +- For each argument, if its value is unknown and its corresponding parameter + is not specified as accepting unknowns, the _result value_ is forced to be an + unknown value of the result type. + +- If the previous condition does not apply, the function's result value + definition is used to determine the call's _result value_. + +The result of a function call expression is either an error, if one of the +erroneous conditions above applies, or the _result value_. + +## Type Conversions and Unification + +Values given in configuration may not always match the expectations of the +operations applied to them or to the calling application. In such situations, +automatic type conversion is attempted as a convenience to the user. + +Along with conversions to a _specified_ type, it is sometimes necessary to +ensure that a selection of values are all of the _same_ type, without any +constraint on which type that is. This is the process of _type unification_, +which attempts to find the most general type that all of the given types can +be converted to. + +Both type conversions and unification are defined in the syntax-agnostic +model to ensure consistency of behavior between syntaxes. + +Type conversions are broadly characterized into two categories: _safe_ and +_unsafe_. A conversion is "safe" if any distinct value of the source type +has a corresponding distinct value in the target type. A conversion is +"unsafe" if either the target type values are _not_ distinct (information +may be lost in conversion) or if some values of the source type do not have +any corresponding value in the target type. An unsafe conversion may result +in an error. + +A given type can always be converted to itself, which is a no-op. + +### Conversion of Null Values + +All null values are safely convertable to a null value of any other type, +regardless of other type-specific rules specified in the sections below. + +### Conversion to and from the Dynamic Pseudo-type + +Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds, +producing an unknown value of the target type. + +Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result +is the input value, verbatim. This is the only situation where the conversion +result value is not of the given target type. + +### Primitive Type Conversions + +Bidirectional conversions are available between the string and number types, +and between the string and boolean types. + +The bool value true corresponds to the string containing the characters "true", +while the bool value false corresponds to the string containing the characters +"false". Conversion from bool to string is safe, while the converse is +unsafe. The strings "1" and "0" are alternative string representations +of true and false respectively. It is an error to convert a string other than +the four in this paragraph to type bool. + +A number value is converted to string by translating its integer portion +into a sequence of decimal digits (`0` through `9`), and then if it has a +non-zero fractional part, a period `.` followed by a sequence of decimal +digits representing its fractional part. No exponent portion is included. +The number is converted at its full precision. Conversion from number to +string is safe. + +A string is converted to a number value by reversing the above mapping. +No exponent portion is allowed. Conversion from string to number is unsafe. +It is an error to convert a string that does not comply with the expected +syntax to type number. + +No direct conversion is available between the bool and number types. + +### Collection and Structural Type Conversions + +Conversion from set types to list types is _safe_, as long as their +element types are safely convertable. If the element types are _unsafely_ +convertable, then the collection conversion is also unsafe. Each set element +becomes a corresponding list element, in an undefined order. Although no +particular ordering is required, implementations _should_ produce list +elements in a consistent order for a given input set, as a convenience +to calling applications. + +Conversion from list types to set types is _unsafe_, as long as their element +types are convertable. Each distinct list item becomes a distinct set item. +If two list items are equal, one of the two is lost in the conversion. + +Conversion from tuple types to list types permitted if all of the +tuple element types are convertable to the target list element type. +The safety of the conversion depends on the safety of each of the element +conversions. Each element in turn is converted to the list element type, +producing a list of identical length. + +Conversion from tuple types to set types is permitted, behaving as if the +tuple type was first converted to a list of the same element type and then +that list converted to the target set type. + +Conversion from object types to map types is permitted if all of the object +attribute types are convertable to the target map element type. The safety +of the conversion depends on the safety of each of the attribute conversions. +Each attribute in turn is converted to the map element type, and map element +keys are set to the name of each corresponding object attribute. + +Conversion from list and set types to tuple types is permitted, following +the opposite steps as the converse conversions. Such conversions are _unsafe_. +It is an error to convert a list or set to a tuple type whose number of +elements does not match the list or set length. + +Conversion from map types to object types is permitted if each map key +corresponds to an attribute in the target object type. It is an error to +convert from a map value whose set of keys does not exactly match the target +type's attributes. The conversion takes the opposite steps of the converse +conversion. + +Conversion from one object type to another is permitted as long as the +common attribute names have convertable types. Any attribute present in the +target type but not in the source type is populated with a null value of +the appropriate type. + +Conversion from one tuple type to another is permitted as long as the +tuples have the same length and the elements have convertable types. + +### Type Unification + +Type unification is an operation that takes a list of types and attempts +to find a single type to which they can all be converted. Since some +type pairs have bidirectional conversions, preference is given to _safe_ +conversions. In technical terms, all possible types are arranged into +a lattice, from which a most general supertype is selected where possible. + +The type resulting from type unification may be one of the input types, or +it may be an entirely new type produced by combination of two or more +input types. + +The following rules do not guarantee a valid result. In addition to these +rules, unification fails if any of the given types are not convertable +(per the above rules) to the selected result type. + +The following unification rules apply transitively. That is, if a rule is +defined from A to B, and one from B to C, then A can unify to C. + +Number and bool types both unify with string by preferring string. + +Two collection types of the same kind unify according to the unification +of their element types. + +List and set types unify by preferring the list type. + +Map and object types unify by preferring the object type. + +List, set and tuple types unify by preferring the tuple type. + +The dynamic pseudo-type unifies with any other type by selecting that other +type. The dynamic pseudo-type is the result type only if _all_ input types +are the dynamic pseudo-type. + +Two object types unify by constructing a new type whose attributes are +the union of those of the two input types. Any common attributes themselves +have their types unified. + +Two tuple types of the same length unify constructing a new type of the +same length whose elements are the unification of the corresponding elements +in the two input types. + +## Static Analysis + +In most applications, full expression evaluation is sufficient for understanding +the provided configuration. However, some specialized applications require more +direct access to the physical structures in the expressions, which can for +example allow the construction of new language constructs in terms of the +existing syntax elements. + +Since static analysis analyses the physical structure of configuration, the +details will vary depending on syntax. Each syntax must decide which of its +physical structures corresponds to the following analyses, producing error +diagnostics if they are applied to inappropriate expressions. + +The following are the required static analysis functions: + +- **Static List**: Require list/tuple construction syntax to be used and + return a list of expressions for each of the elements given. + +- **Static Map**: Require map/object construction syntax to be used and + return a list of key/value pairs -- both expressions -- for each of + the elements given. The usual constraint that a map key must be a string + must not apply to this analysis, thus allowing applications to interpret + arbitrary keys as they see fit. + +- **Static Call**: Require function call syntax to be used and return an + object describing the called function name and a list of expressions + representing each of the call arguments. + +- **Static Traversal**: Require a reference to a symbol in the variable + scope and return a description of the path from the root scope to the + accessed attribute or index. + +The intent of a calling application using these features is to require a more +rigid interpretation of the configuration than in expression evaluation. +Syntax implementations should make use of the extra contextual information +provided in order to make an intuitive mapping onto the constructs of the +underlying syntax, possibly interpreting the expression slightly differently +than it would be interpreted in normal evaluation. + +Each syntax must define which of its expression elements each of the analyses +above applies to, and how those analyses behave given those expression elements. + +## Implementation Considerations + +Implementations of this specification are free to adopt any strategy that +produces behavior consistent with the specification. This non-normative +section describes some possible implementation strategies that are consistent +with the goals of this specification. + +### Language-agnosticism + +The language-agnosticism of this specification assumes that certain behaviors +are implemented separately for each syntax: + +- Matching of a body schema with the physical elements of a body in the + source language, to determine correspondence between physical constructs + and schema elements. + +- Implementing the _dynamic attributes_ body processing mode by either + interpreting all physical constructs as attributes or producing an error + if non-attribute constructs are present. + +- Providing an evaluation function for all possible expressions that produces + a value given an evaluation context. + +- Providing the static analysis functionality described above in a manner that + makes sense within the convention of the syntax. + +The suggested implementation strategy is to use an implementation language's +closest concept to an _abstract type_, _virtual type_ or _interface type_ +to represent both Body and Expression. Each language-specific implementation +can then provide an implementation of each of these types wrapping AST nodes +or other physical constructs from the language parser. diff --git a/vendor/github.com/hashicorp/hcl/v2/static_expr.go b/vendor/github.com/hashicorp/hcl/v2/static_expr.go new file mode 100644 index 000000000..98ada87b6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/vendor/github.com/hashicorp/hcl/v2/structure.go b/vendor/github.com/hashicorp/hcl/v2/structure.go new file mode 100644 index 000000000..aab09457d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of hierarchical structure within configuration. +// +// The content of a body cannot be meaningfully interpreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go new file mode 100644 index 000000000..8521814e5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go @@ -0,0 +1,117 @@ +package hcl + +// ----------------------------------------------------------------------------- +// The methods in this file all have the general pattern of making a best-effort +// to find one or more constructs that contain a given source position. +// +// These all operate by delegating to an optional method of the same name and +// signature on the file's root body, allowing each syntax to potentially +// provide its own implementations of these. For syntaxes that don't implement +// them, the result is always nil. +// ----------------------------------------------------------------------------- + +// BlocksAtPos attempts to find all of the blocks that contain the given +// position, ordered so that the outermost block is first and the innermost +// block is last. This is a best-effort method that may not be able to produce +// a complete result for all positions or for all HCL syntaxes. +// +// If the returned slice is non-empty, the first element is guaranteed to +// represent the same block as would be the result of OutermostBlockAtPos and +// the last element the result of InnermostBlockAtPos. However, the +// implementation may return two different objects describing the same block, +// so comparison by pointer identity is not possible. +// +// The result is nil if no blocks at all contain the given position. +func (f *File) BlocksAtPos(pos Pos) []*Block { + // The root body of the file must implement this interface in order + // to support BlocksAtPos. + type Interface interface { + BlocksAtPos(pos Pos) []*Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.BlocksAtPos(pos) +} + +// OutermostBlockAtPos attempts to find a top-level block in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) OutermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support OutermostBlockAtPos. + type Interface interface { + OutermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostBlockAtPos(pos) +} + +// InnermostBlockAtPos attempts to find the most deeply-nested block in the +// receiving file that contains the given position. This is a best-effort +// method that may not be able to produce a result for all positions or for +// all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) InnermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support InnermostBlockAtPos. + type Interface interface { + InnermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.InnermostBlockAtPos(pos) +} + +// OutermostExprAtPos attempts to find an expression in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// Since expressions are often nested inside one another, this method returns +// the outermost "root" expression that is not contained by any other. +// +// The result is nil if no single expression could be selected for any reason. +func (f *File) OutermostExprAtPos(pos Pos) Expression { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + OutermostExprAtPos(pos Pos) Expression + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostExprAtPos(pos) +} + +// AttributeAtPos attempts to find an attribute definition in the receiving +// file that contains the given position. This is a best-effort method that may +// not be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single attribute could be selected for any reason. +func (f *File) AttributeAtPos(pos Pos) *Attribute { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + AttributeAtPos(pos Pos) *Attribute + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.AttributeAtPos(pos) +} diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal.go b/vendor/github.com/hashicorp/hcl/v2/traversal.go new file mode 100644 index 000000000..d71019700 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/traversal.go @@ -0,0 +1,293 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return GetAttr(val, tn.Name, &tn.SrcRange) +} + +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go new file mode 100644 index 000000000..f69d5fe9b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go @@ -0,0 +1,124 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of HCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + ret := make(Traversal, len(traversal)) + copy(ret, traversal) + root := traversal[0].(TraverseRoot) + ret[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + return ret, diags + } + return traversal, diags +} + +// ExprAsKeyword attempts to interpret the given expression as a static keyword, +// returning the keyword string if possible, and the empty string if not. +// +// A static keyword, for the sake of this function, is a single identifier. +// For example, the following attribute has an expression that would produce +// the keyword "foo": +// +// example = foo +// +// This function is a variant of AbsTraversalForExpr, which uses the same +// interface on the given expression. This helper constrains the result +// further by requiring only a single root identifier. +// +// This function is intended to be used with the following idiom, to recognize +// situations where one of a fixed set of keywords is required and arbitrary +// expressions are not allowed: +// +// switch hcl.ExprAsKeyword(expr) { +// case "allow": +// // (take suitable action for keyword "allow") +// case "deny": +// // (take suitable action for keyword "deny") +// default: +// diags = append(diags, &hcl.Diagnostic{ +// // ... "invalid keyword" diagnostic message ... +// }) +// } +// +// The above approach will generate the same message for both the use of an +// unrecognized keyword and for not using a keyword at all, which is usually +// reasonable if the message specifies that the given value must be a keyword +// from that fixed list. +// +// Note that in the native syntax the keywords "true", "false", and "null" are +// recognized as literal values during parsing and so these reserved words +// cannot not be accepted as keywords by this function. +// +// Since interpreting an expression as a keyword bypasses usual expression +// evaluation, it should be used sparingly for situations where e.g. one of +// a fixed set of keywords is used in a structural way in a special attribute +// to affect the further processing of a block. +func ExprAsKeyword(expr Expression) string { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); len(traversal) == 1 { + return traversal.RootName() + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/logutils/.gitignore b/vendor/github.com/hashicorp/logutils/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md new file mode 100644 index 000000000..49490eaeb --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/README.md @@ -0,0 +1,36 @@ +# logutils + +logutils is a Go package that augments the standard library "log" package +to make logging a bit more modern, without fragmenting the Go ecosystem +with new logging packages. + +## The simplest thing that could possibly work + +Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following: + +```go +package main + +import ( + "log" + "os" + + "github.com/hashicorp/logutils" +) + +func main() { + filter := &logutils.LevelFilter{ + Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: logutils.LogLevel("WARN"), + Writer: os.Stderr, + } + log.SetOutput(filter) + + log.Print("[DEBUG] Debugging") // this will not print + log.Print("[WARN] Warning") // this will + log.Print("[ERROR] Erring") // and so will this + log.Print("Message I haven't updated") // and so will this +} +``` + +This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before. diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod new file mode 100644 index 000000000..ba38a4576 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/logutils diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go new file mode 100644 index 000000000..6381bf162 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/level.go @@ -0,0 +1,81 @@ +// Package logutils augments the standard log package with levels. +package logutils + +import ( + "bytes" + "io" + "sync" +) + +type LogLevel string + +// LevelFilter is an io.Writer that can be used with a logger that +// will filter out log messages that aren't at least a certain level. +// +// Once the filter is in use somewhere, it is not safe to modify +// the structure. +type LevelFilter struct { + // Levels is the list of log levels, in increasing order of + // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. + Levels []LogLevel + + // MinLevel is the minimum level allowed through + MinLevel LogLevel + + // The underlying io.Writer where log messages that pass the filter + // will be set. + Writer io.Writer + + badLevels map[LogLevel]struct{} + once sync.Once +} + +// Check will check a given line if it would be included in the level +// filter. +func (f *LevelFilter) Check(line []byte) bool { + f.once.Do(f.init) + + // Check for a log level + var level LogLevel + x := bytes.IndexByte(line, '[') + if x >= 0 { + y := bytes.IndexByte(line[x:], ']') + if y >= 0 { + level = LogLevel(line[x+1 : x+y]) + } + } + + _, ok := f.badLevels[level] + return !ok +} + +func (f *LevelFilter) Write(p []byte) (n int, err error) { + // Note in general that io.Writer can receive any byte sequence + // to write, but the "log" package always guarantees that we only + // get a single line. We use that as a slight optimization within + // this method, assuming we're dealing with a single, complete line + // of log data. + + if !f.Check(p) { + return len(p), nil + } + + return f.Writer.Write(p) +} + +// SetMinLevel is used to update the minimum log level +func (f *LevelFilter) SetMinLevel(min LogLevel) { + f.MinLevel = min + f.init() +} + +func (f *LevelFilter) init() { + badLevels := make(map[LogLevel]struct{}) + for _, level := range f.Levels { + if level == f.MinLevel { + break + } + badLevels[level] = struct{}{} + } + f.badLevels = badLevels +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go new file mode 100644 index 000000000..d9d276258 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go @@ -0,0 +1,138 @@ +package tfconfig + +import ( + "fmt" + + legacyhclparser "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/v2" +) + +// Diagnostic describes a problem (error or warning) encountered during +// configuration loading. +type Diagnostic struct { + Severity DiagSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + + // Pos is not populated for all diagnostics, but when populated should + // indicate a particular line that the described problem relates to. + Pos *SourcePos `json:"pos,omitempty"` +} + +// Diagnostics represents a sequence of diagnostics. This is the type that +// should be returned from a function that might generate diagnostics. +type Diagnostics []Diagnostic + +// HasErrors returns true if there is at least one Diagnostic of severity +// DiagError in the receiever. +// +// If a function returns a Diagnostics without errors then the result can +// be assumed to be complete within the "best effort" constraints of this +// library. If errors are present then the caller may wish to employ more +// caution in relying on the result. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (diags Diagnostics) Error() string { + switch len(diags) { + case 0: + return "no problems" + case 1: + return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) + default: + return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) + } +} + +// Err returns an error representing the receiver if the receiver HasErrors, or +// nil otherwise. +// +// The returned error can be type-asserted back to a Diagnostics if needed. +func (diags Diagnostics) Err() error { + if diags.HasErrors() { + return diags + } + return nil +} + +// DiagSeverity describes the severity of a Diagnostic. +type DiagSeverity rune + +// DiagError indicates a problem that prevented proper processing of the +// configuration. In the precense of DiagError diagnostics the result is +// likely to be incomplete. +const DiagError DiagSeverity = 'E' + +// DiagWarning indicates a problem that the user may wish to consider but +// that did not prevent proper processing of the configuration. +const DiagWarning DiagSeverity = 'W' + +// MarshalJSON is an implementation of encoding/json.Marshaler +func (s DiagSeverity) MarshalJSON() ([]byte, error) { + switch s { + case DiagError: + return []byte(`"error"`), nil + case DiagWarning: + return []byte(`"warning"`), nil + default: + return []byte(`"invalid"`), nil + } +} + +func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { + if len(diags) == 0 { + return nil + } + ret := make(Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = Diagnostic{ + Summary: diag.Summary, + Detail: diag.Detail, + } + switch diag.Severity { + case hcl.DiagError: + ret[i].Severity = DiagError + case hcl.DiagWarning: + ret[i].Severity = DiagWarning + } + if diag.Subject != nil { + pos := sourcePosHCL(*diag.Subject) + ret[i].Pos = &pos + } + } + return ret +} + +func diagnosticsError(err error) Diagnostics { + if err == nil { + return nil + } + + if posErr, ok := err.(*legacyhclparser.PosError); ok { + pos := sourcePosLegacyHCL(posErr.Pos, "") + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: posErr.Err.Error(), + Pos: &pos, + }, + } + } + + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: err.Error(), + }, + } +} + +func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { + return diagnosticsError(fmt.Errorf(format, args...)) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go new file mode 100644 index 000000000..1604a6e08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go @@ -0,0 +1,21 @@ +// Package tfconfig is a helper library that does careful, shallow parsing of +// Terraform modules to provide access to high-level metadata while +// remaining broadly compatible with configurations targeting various +// different Terraform versions. +// +// This packge focuses on describing top-level objects only, and in particular +// does not attempt any sort of processing that would require access to plugins. +// Currently it allows callers to extract high-level information about +// variables, outputs, resource blocks, provider dependencies, and Terraform +// Core dependencies. +// +// This package only works at the level of single modules. A full configuration +// is a tree of potentially several modules, some of which may be references +// to remote packages. There are some basic helpers for traversing calls to +// modules at relative local paths, however. +// +// This package employs a "best effort" parsing strategy, producing as complete +// a result as possible even though the input may not be entirely valid. The +// intended use-case is high-level analysis and indexing of externally-facing +// module characteristics, as opposed to validating or even applying the module. +package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go new file mode 100644 index 000000000..faa93ed6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go @@ -0,0 +1,130 @@ +package tfconfig + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// LoadModule reads the directory at the given path and attempts to interpret +// it as a Terraform module. +func LoadModule(dir string) (*Module, Diagnostics) { + + // For broad compatibility here we actually have two separate loader + // codepaths. The main one uses the new HCL parser and API and is intended + // for configurations from Terraform 0.12 onwards (though will work for + // many older configurations too), but we'll also fall back on one that + // uses the _old_ HCL implementation so we can deal with some edge-cases + // that are not valid in new HCL. + + module, diags := loadModule(dir) + if diags.HasErrors() { + // Try using the legacy HCL parser and see if we fare better. + legacyModule, legacyDiags := loadModuleLegacyHCL(dir) + if !legacyDiags.HasErrors() { + legacyModule.init(legacyDiags) + return legacyModule, legacyDiags + } + } + + module.init(diags) + return module, diags +} + +// IsModuleDir checks if the given path contains terraform configuration files. +// This allows the caller to decide how to handle directories that do not have tf files. +func IsModuleDir(dir string) bool { + primaryPaths, _ := dirFiles(dir) + if len(primaryPaths) == 0 { + return false + } + return true +} + +func (m *Module) init(diags Diagnostics) { + // Fill in any additional provider requirements that are implied by + // resource configurations, to avoid the caller from needing to apply + // this logic itself. Implied requirements don't have version constraints, + // but we'll make sure the requirement value is still non-nil in this + // case so callers can easily recognize it. + for _, r := range m.ManagedResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + for _, r := range m.DataResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + + // We redundantly also reference the diagnostics from inside the module + // object, primarily so that we can easily included in JSON-serialized + // versions of the module object. + m.Diagnostics = diags +} + +func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { + infos, err := ioutil.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + var override []string + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || isIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + // We are assuming that any _override files will be logically named, + // and processing the files in alphabetical order. Primaries first, then overrides. + primary = append(primary, override...) + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// isIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go new file mode 100644 index 000000000..9cb3aeef1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go @@ -0,0 +1,322 @@ +package tfconfig + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func loadModule(dir string) (*Module, Diagnostics) { + mod := newModule(dir) + primaryPaths, diags := dirFiles(dir) + + parser := hclparse.NewParser() + + for _, filename := range primaryPaths { + var file *hcl.File + var fileDiags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, fileDiags = parser.ParseJSONFile(filename) + } else { + file, fileDiags = parser.ParseHCLFile(filename) + } + diags = append(diags, fileDiags...) + if file == nil { + continue + } + + content, _, contentDiags := file.Body.PartialContent(rootSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) + diags = append(diags, contentDiags...) + + if attr, defined := content.Attributes["required_version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredCore = append(mod.RequiredCore, version) + } + } + + for _, block := range content.Blocks { + // Our schema only allows required_providers here, so we + // assume that we'll only get that block type. + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + for name, attr := range attrs { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + } + + case "variable": + content, _, contentDiags := block.Body.PartialContent(variableSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + v := &Variable{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Variables[name] = v + + if attr, defined := content.Attributes["type"]; defined { + // We handle this particular attribute in a somewhat-tricky way: + // since Terraform may evolve its type expression syntax in + // future versions, we don't want to be overly-strict in how + // we handle it here, and so we'll instead just take the raw + // source provided by the user, using the source location + // information in the expression object. + // + // However, older versions of Terraform expected the type + // to be a string containing a keyword, so we'll need to + // handle that as a special case first for backward compatibility. + + var typeExpr string + + var typeExprAsStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) + if !valDiags.HasErrors() { + typeExpr = typeExprAsStr + } else { + + rng := attr.Expr.Range() + sourceFilename := rng.Filename + source, exists := parser.Sources()[sourceFilename] + if exists { + typeExpr = string(rng.SliceBytes(source)) + } else { + // This should never happen, so we'll just warn about it and leave the type unspecified. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Source code not available", + Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), + Subject: &block.DefRange, + }) + typeExpr = "" + } + + } + + v.Type = typeExpr + } + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + v.Description = description + } + + if attr, defined := content.Attributes["default"]; defined { + // To avoid the caller needing to deal with cty here, we'll + // use its JSON encoding to convert into an + // approximately-equivalent plain Go interface{} value + // to return. + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + if val.IsWhollyKnown() { // should only be false if there are errors in the input + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + // Should never happen, since all possible known + // values have a JSON mapping. + panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) + } + var def interface{} + err = json.Unmarshal(valJSON, &def) + if err != nil { + // Again should never happen, because valJSON is + // guaranteed valid by ctyjson.Marshal. + panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) + } + v.Default = def + } + } + + case "output": + + content, _, contentDiags := block.Body.PartialContent(outputSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + o := &Output{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Outputs[name] = o + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + o.Description = description + } + + case "provider": + + content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + case "resource", "data": + + content, _, contentDiags := block.Body.PartialContent(resourceSchema) + diags = append(diags, contentDiags...) + + typeName := block.Labels[0] + name := block.Labels[1] + + r := &Resource{ + Type: typeName, + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + var resourcesMap map[string]*Resource + + switch block.Type { + case "resource": + r.Mode = ManagedResourceMode + resourcesMap = mod.ManagedResources + case "data": + r.Mode = DataResourceMode + resourcesMap = mod.DataResources + } + + key := r.MapKey() + + resourcesMap[key] = r + + if attr, defined := content.Attributes["provider"]; defined { + // New style here is to provide this as a naked traversal + // expression, but we also support quoted references for + // older configurations that predated this convention. + traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) + if travDiags.HasErrors() { + traversal = nil // in case we got any partial results + + // Fall back on trying to parse as a string + var travStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) + if !valDiags.HasErrors() { + var strDiags hcl.Diagnostics + traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) + if strDiags.HasErrors() { + traversal = nil + } + } + } + + // If we get out here with a nil traversal then we didn't + // succeed in processing the input. + if len(traversal) > 0 { + providerName := traversal.RootName() + alias := "" + if len(traversal) > 1 { + if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { + alias = getAttr.Name + } + } + r.Provider = ProviderRef{ + Name: providerName, + Alias: alias, + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider reference", + Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + // If provider _isn't_ set then we'll infer it from the + // resource type. + r.Provider = ProviderRef{ + Name: resourceTypeDefaultProviderName(r.Type), + } + } + + case "module": + + content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + mc := &ModuleCall{ + Name: block.Labels[0], + Pos: sourcePosHCL(block.DefRange), + } + + // check if this is overriding an existing module + var origSource string + if origMod, exists := mod.ModuleCalls[name]; exists { + origSource = origMod.Source + } + + mod.ModuleCalls[name] = mc + + if attr, defined := content.Attributes["source"]; defined { + var source string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) + diags = append(diags, valDiags...) + mc.Source = source + } + + if mc.Source == "" { + mc.Source = origSource + } + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + mc.Version = version + } + + default: + // Should never happen because our cases above should be + // exhaustive for our schema. + panic(fmt.Errorf("unhandled block type %q", block.Type)) + } + } + } + + return mod, diagnosticsHCL(diags) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go new file mode 100644 index 000000000..86ffdf11d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go @@ -0,0 +1,325 @@ +package tfconfig + +import ( + "io/ioutil" + "strings" + + legacyhcl "github.com/hashicorp/hcl" + legacyast "github.com/hashicorp/hcl/hcl/ast" +) + +func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { + // This implementation is intentionally more quick-and-dirty than the + // main loader. In particular, it doesn't bother to keep careful track + // of multiple error messages because we always fall back on returning + // the main parser's error message if our fallback parsing produces + // an error, and thus the errors here are not seen by the end-caller. + mod := newModule(dir) + + primaryPaths, diags := dirFiles(dir) + if diags.HasErrors() { + return mod, diagnosticsHCL(diags) + } + + for _, filename := range primaryPaths { + src, err := ioutil.ReadFile(filename) + if err != nil { + return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) + } + + hclRoot, err := legacyhcl.Parse(string(src)) + if err != nil { + return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) + } + + list, ok := hclRoot.Node.(*legacyast.ObjectList) + if !ok { + return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) + } + + for _, item := range list.Filter("terraform").Items { + if len(item.Keys) > 0 { + item = &legacyast.ObjectItem{ + Val: &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{item}, + }, + }, + } + } + + type TerraformBlock struct { + RequiredVersion string `hcl:"required_version"` + } + var block TerraformBlock + err = legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("terraform block: %s", err) + } + + if block.RequiredVersion != "" { + mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) + } + } + + if vars := list.Filter("variable"); len(vars.Items) > 0 { + vars = vars.Children() + type VariableBlock struct { + Type string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + for _, item := range vars.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block VariableBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) + } + + // Clean up legacy HCL decoding ambiguity by unwrapping list of maps + if ms, ok := block.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + block.Default = def + } + + v := &Variable{ + Name: name, + Type: block.Type, + Description: block.Description, + Default: block.Default, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Variables[name]; exists { + return nil, diagnosticsErrorf("duplicate variable block for %q", name) + } + mod.Variables[name] = v + + } + } + + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + outputs = outputs.Children() + type OutputBlock struct { + Description string + } + + for _, item := range outputs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block OutputBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) + } + + o := &Output{ + Name: name, + Description: block.Description, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Outputs[name]; exists { + return nil, diagnosticsErrorf("duplicate output block for %q", name) + } + mod.Outputs[name] = o + } + } + + for _, blockType := range []string{"resource", "data"} { + if resources := list.Filter(blockType); len(resources.Items) > 0 { + resources = resources.Children() + type ResourceBlock struct { + Provider string + } + + for _, item := range resources.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) + } + + typeName := item.Keys[0].Token.Value().(string) + name := item.Keys[1].Token.Value().(string) + var mode ResourceMode + var rMap map[string]*Resource + switch blockType { + case "resource": + mode = ManagedResourceMode + rMap = mod.ManagedResources + case "data": + mode = DataResourceMode + rMap = mod.DataResources + } + + var block ResourceBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) + } + + var providerName, providerAlias string + if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { + providerName = block.Provider[:dotPos] + providerAlias = block.Provider[dotPos+1:] + } else { + providerName = block.Provider + } + if providerName == "" { + providerName = resourceTypeDefaultProviderName(typeName) + } + + r := &Resource{ + Mode: mode, + Type: typeName, + Name: name, + Provider: ProviderRef{ + Name: providerName, + Alias: providerAlias, + }, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + key := r.MapKey() + if _, exists := rMap[key]; exists { + return nil, diagnosticsErrorf("duplicate resource block for %q", key) + } + rMap[key] = r + } + } + + } + + if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { + moduleCalls = moduleCalls.Children() + type ModuleBlock struct { + Source string + Version string + } + + for _, item := range moduleCalls.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ModuleBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) + } + + mc := &ModuleCall{ + Name: name, + Source: block.Source, + Version: block.Version, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + // it's possible this module call is from an override file + if origMod, exists := mod.ModuleCalls[name]; exists { + if mc.Source == "" { + mc.Source = origMod.Source + } + } + mod.ModuleCalls[name] = mc + } + } + + if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { + providerConfigs = providerConfigs.Children() + type ProviderBlock struct { + Version string + } + + for _, item := range providerConfigs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ProviderBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) + } + + if block.Version != "" { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version) + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + } + } + } + + return mod, nil +} + +// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{ + &legacyast.ObjectItem{ + Keys: []*legacyast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go new file mode 100644 index 000000000..65ddb2307 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go @@ -0,0 +1,35 @@ +package tfconfig + +// Module is the top-level type representing a parsed and processed Terraform +// module. +type Module struct { + // Path is the local filesystem directory where the module was loaded from. + Path string `json:"path"` + + Variables map[string]*Variable `json:"variables"` + Outputs map[string]*Output `json:"outputs"` + + RequiredCore []string `json:"required_core,omitempty"` + RequiredProviders map[string][]string `json:"required_providers"` + + ManagedResources map[string]*Resource `json:"managed_resources"` + DataResources map[string]*Resource `json:"data_resources"` + ModuleCalls map[string]*ModuleCall `json:"module_calls"` + + // Diagnostics records any errors and warnings that were detected during + // loading, primarily for inclusion in serialized forms of the module + // since this slice is also returned as a second argument from LoadModule. + Diagnostics Diagnostics `json:"diagnostics,omitempty"` +} + +func newModule(path string) *Module { + return &Module{ + Path: path, + Variables: make(map[string]*Variable), + Outputs: make(map[string]*Output), + RequiredProviders: make(map[string][]string), + ManagedResources: make(map[string]*Resource), + DataResources: make(map[string]*Resource), + ModuleCalls: make(map[string]*ModuleCall), + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go new file mode 100644 index 000000000..5e1e05a72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go @@ -0,0 +1,11 @@ +package tfconfig + +// ModuleCall represents a "module" block within a module. That is, a +// declaration of a child module from inside its parent. +type ModuleCall struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go new file mode 100644 index 000000000..890b25e69 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go @@ -0,0 +1,9 @@ +package tfconfig + +// Output represents a single output from a Terraform module. +type Output struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go new file mode 100644 index 000000000..d92483778 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go @@ -0,0 +1,9 @@ +package tfconfig + +// ProviderRef is a reference to a provider configuration within a module. +// It represents the contents of a "provider" argument in a resource, or +// a value in the "providers" map for a module call. +type ProviderRef struct { + Name string `json:"name"` + Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go new file mode 100644 index 000000000..401c8fce9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go @@ -0,0 +1,64 @@ +package tfconfig + +import ( + "fmt" + "strconv" + "strings" +) + +// Resource represents a single "resource" or "data" block within a module. +type Resource struct { + Mode ResourceMode `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + + Provider ProviderRef `json:"provider"` + + Pos SourcePos `json:"pos"` +} + +// MapKey returns a string that can be used to uniquely identify the receiver +// in a map[string]*Resource. +func (r *Resource) MapKey() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // should never happen + return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) + } +} + +// ResourceMode represents the "mode" of a resource, which is used to +// distinguish between managed resources ("resource" blocks in config) and +// data resources ("data" blocks in config). +type ResourceMode rune + +const InvalidResourceMode ResourceMode = 0 +const ManagedResourceMode ResourceMode = 'M' +const DataResourceMode ResourceMode = 'D' + +func (m ResourceMode) String() string { + switch m { + case ManagedResourceMode: + return "managed" + case DataResourceMode: + return "data" + default: + return "" + } +} + +// MarshalJSON implements encoding/json.Marshaler. +func (m ResourceMode) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(m.String())), nil +} + +func resourceTypeDefaultProviderName(typeName string) string { + if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { + return typeName[:underPos] + } + return typeName +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go new file mode 100644 index 000000000..fd6ca9e70 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go @@ -0,0 +1,106 @@ +package tfconfig + +import ( + "github.com/hashicorp/hcl/v2" +) + +var rootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + LabelNames: nil, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + }, +} + +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "required_providers", + }, + }, +} + +var providerConfigSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "version", + }, + { + Name: "alias", + }, + }, +} + +var variableSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "type", + }, + { + Name: "description", + }, + { + Name: "default", + }, + }, +} + +var outputSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + }, +} + +var moduleCallSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + }, + { + Name: "version", + }, + { + Name: "providers", + }, + }, +} + +var resourceSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "provider", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go new file mode 100644 index 000000000..548c9f9a3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go @@ -0,0 +1,50 @@ +package tfconfig + +import ( + legacyhcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/v2" +) + +// SourcePos is a pointer to a particular location in a source file. +// +// This type is embedded into other structs to allow callers to locate the +// definition of each described module element. The SourcePos of an element +// is usually the first line of its definition, although the definition can +// be a little "fuzzy" with JSON-based config files. +type SourcePos struct { + Filename string `json:"filename"` + Line int `json:"line"` +} + +func sourcePos(filename string, line int) SourcePos { + return SourcePos{ + Filename: filename, + Line: line, + } +} + +func sourcePosHCL(rng hcl.Range) SourcePos { + // We intentionally throw away the column information here because + // current and legacy HCL both disagree on the definition of a column + // and so a line-only reference is the best granularity we can do + // such that the result is consistent between both parsers. + return SourcePos{ + Filename: rng.Filename, + Line: rng.Start.Line, + } +} + +func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { + useFilename := pos.Filename + // We'll try to use the filename given in legacy HCL position, but + // in practice there's no way to actually get this populated via + // the HCL API so it's usually empty except in some specialized + // situations, such as positions in error objects. + if useFilename == "" { + useFilename = filename + } + return SourcePos{ + Filename: useFilename, + Line: pos.Line, + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go new file mode 100644 index 000000000..0f73fc995 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go @@ -0,0 +1,16 @@ +package tfconfig + +// Variable represents a single variable from a Terraform module. +type Variable struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + + // Default is an approximate representation of the default value in + // the native Go type system. The conversion from the value given in + // configuration may be slightly lossy. Only values that can be + // serialized by json.Marshal will be included here. + Default interface{} `json:"default,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go new file mode 100644 index 000000000..9d31031a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/acctest.go @@ -0,0 +1,2 @@ +// Package acctest contains for Terraform Acceptance Tests +package acctest diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go new file mode 100644 index 000000000..258e4db70 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/random.go @@ -0,0 +1,176 @@ +package acctest + +import ( + "bytes" + crand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "math/rand" + "net" + "strings" + "time" + + "golang.org/x/crypto/ssh" + + "github.com/apparentlymart/go-cidr/cidr" +) + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +// Helpers for generating random tidbits for use in identifiers to prevent +// collisions in acceptance tests. + +// RandInt generates a random integer +func RandInt() int { + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() +} + +// RandomWithPrefix is used to generate a unique name with a prefix, for +// randomizing names in acceptance tests +func RandomWithPrefix(name string) string { + return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} + +func RandIntRange(min int, max int) int { + source := rand.New(rand.NewSource(time.Now().UnixNano())) + rangeMax := max - min + + return int(source.Int31n(int32(rangeMax))) +} + +// RandString generates a random alphanumeric string of the length specified +func RandString(strlen int) string { + return RandStringFromCharSet(strlen, CharSetAlphaNum) +} + +// RandStringFromCharSet generates a random string by selecting characters from +// the charset provided +func RandStringFromCharSet(strlen int, charSet string) string { + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = charSet[rand.Intn(len(charSet))] + } + return string(result) +} + +// RandSSHKeyPair generates a public and private SSH key pair. The public key is +// returned in OpenSSH format, and the private key is PEM encoded. +func RandSSHKeyPair(comment string) (string, string, error) { + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return "", "", err + } + keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) + return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil +} + +// RandTLSCert generates a self-signed TLS certificate with a newly created +// private key, and returns both the cert and the private key PEM encoded. +func RandTLSCert(orgName string) (string, string, error) { + template := &x509.Certificate{ + SerialNumber: big.NewInt(int64(RandInt())), + Subject: pkix.Name{ + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) + if err != nil { + return "", "", err + } + + certPEM, err := pemEncode(cert, "CERTIFICATE") + if err != nil { + return "", "", err + } + + return certPEM, privateKeyPEM, nil +} + +// RandIpAddress returns a random IP address in the specified CIDR block. +// The prefix length must be less than 31. +func RandIpAddress(s string) (string, error) { + _, network, err := net.ParseCIDR(s) + if err != nil { + return "", err + } + + firstIp, lastIp := cidr.AddressRange(network) + first := &big.Int{} + first.SetBytes([]byte(firstIp)) + last := &big.Int{} + last.SetBytes([]byte(lastIp)) + r := &big.Int{} + r.Sub(last, first) + if len := r.BitLen(); len > 31 { + return "", fmt.Errorf("CIDR range is too large: %d", len) + } + + max := int(r.Int64()) + if max == 0 { + // panic: invalid argument to Int31n + return firstIp.String(), nil + } + + host, err := cidr.Host(network, RandIntRange(0, max)) + if err != nil { + return "", err + } + + return host.String(), nil +} + +func genPrivateKey() (*rsa.PrivateKey, string, error) { + privateKey, err := rsa.GenerateKey(crand.Reader, 1024) + if err != nil { + return nil, "", err + } + + privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") + if err != nil { + return nil, "", err + } + + return privateKey, privateKeyPEM, nil +} + +func pemEncode(b []byte, block string) (string, error) { + var buf bytes.Buffer + pb := &pem.Block{Type: block, Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return "", err + } + + return buf.String(), nil +} + +const ( + // CharSetAlphaNum is the alphanumeric character set for use with + // RandStringFromCharSet + CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" + + // CharSetAlpha is the alphabetical character set for use with + // RandStringFromCharSet + CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go new file mode 100644 index 000000000..87c60b8be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/acctest/remotetests.go @@ -0,0 +1,27 @@ +package acctest + +import ( + "net/http" + "os" + "testing" +) + +// SkipRemoteTestsEnvVar is an environment variable that can be set by a user +// running the tests in an environment with limited network connectivity. By +// default, tests requiring internet connectivity make an effort to skip if no +// internet is available, but in some cases the smoke test will pass even +// though the test should still be skipped. +const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS" + +// RemoteTestPrecheck is meant to be run by any unit test that requires +// outbound internet connectivity. The test will be skipped if it's +// unavailable. +func RemoteTestPrecheck(t *testing.T) { + if os.Getenv(SkipRemoteTestsEnvVar) != "" { + t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar) + } + + if _, err := http.Get("http://google.com"); err != nil { + t.Skipf("skipping, internet seems to not be available: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go new file mode 100644 index 000000000..6ccc52318 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/hashcode/hashcode.go @@ -0,0 +1,35 @@ +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go new file mode 100644 index 000000000..6bd92f777 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/logging.go @@ -0,0 +1,100 @@ +package logging + +import ( + "io" + "io/ioutil" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/logutils" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + EnvLog = "TF_LOG" // Set to True + EnvLogFile = "TF_LOG_PATH" // Set to a file +) + +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} + +// LogOutput determines where we should send logs (if anywhere) and the log level. +func LogOutput() (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPath := os.Getenv(EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput() { + out, err := LogOutput() + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + +// LogLevel returns the current log level string based the environment vars +func LogLevel() string { + envLevel := os.Getenv(EnvLog) + if envLevel == "" { + return "" + } + + logLevel := "TRACE" + if isValidLogLevel(envLevel) { + // allow following for better ux: info, Info or INFO + logLevel = strings.ToUpper(envLevel) + } else { + log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, ValidLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level := string(LogLevel()) + return level == "DEBUG" || level == "TRACE" +} + +func isValidLogLevel(level string) bool { + for _, l := range ValidLevels { + if strings.ToUpper(level) == string(l) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go new file mode 100644 index 000000000..bddabe647 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/logging/transport.go @@ -0,0 +1,70 @@ +package logging + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "net/http/httputil" + "strings" +) + +type transport struct { + name string + transport http.RoundTripper +} + +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + if IsDebugOrHigher() { + reqData, err := httputil.DumpRequestOut(req, true) + if err == nil { + log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) + } else { + log.Printf("[ERROR] %s API Request error: %#v", t.name, err) + } + } + + resp, err := t.transport.RoundTrip(req) + if err != nil { + return resp, err + } + + if IsDebugOrHigher() { + respData, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) + } else { + log.Printf("[ERROR] %s API Response error: %#v", t.name, err) + } + } + + return resp, nil +} + +func NewTransport(name string, t http.RoundTripper) *transport { + return &transport{name, t} +} + +// prettyPrintJsonLines iterates through a []byte line-by-line, +// transforming any lines that are complete json into pretty-printed json. +func prettyPrintJsonLines(b []byte) string { + parts := strings.Split(string(b), "\n") + for i, p := range parts { + if b := []byte(p); json.Valid(b) { + var out bytes.Buffer + json.Indent(&out, b, "", " ") + parts[i] = out.String() + } + } + return strings.Join(parts, "\n") +} + +const logReqMsg = `%s API Request Details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +const logRespMsg = `%s API Response Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go new file mode 100644 index 000000000..7ee21614b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/error.go @@ -0,0 +1,79 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go new file mode 100644 index 000000000..db12cee20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/grpc_test_provider.go @@ -0,0 +1,43 @@ +package resource + +import ( + "context" + "net" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + tfplugin "github.com/hashicorp/terraform-plugin-sdk/plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC +// shim and starts it in a grpc server using an inmem connection. It returns a +// GRPCClient for this new server to test the shimmed resource provider. +func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { + listener := bufconn.Listen(256 * 1024) + grpcServer := grpc.NewServer() + + p := plugin.NewGRPCProviderServerShim(rp) + proto.RegisterProviderServer(grpcServer, p) + + go grpcServer.Serve(listener) + + conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return listener.Dial() + }), grpc.WithInsecure()) + if err != nil { + panic(err) + } + + var pp tfplugin.GRPCProviderPlugin + client, _ := pp.GRPCClient(context.Background(), nil, conn) + + grpcClient := client.(*tfplugin.GRPCProvider) + grpcClient.TestServer = grpcServer + + return grpcClient +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go new file mode 100644 index 000000000..44949550e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/id.go @@ -0,0 +1,45 @@ +package resource + +import ( + "fmt" + "strings" + "sync" + "time" +) + +const UniqueIdPrefix = `terraform-` + +// idCounter is a monotonic counter for generating ordered unique ids. +var idMutex sync.Mutex +var idCounter uint32 + +// Helper for a resource to generate a unique identifier w/ default prefix +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. +func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + + idMutex.Lock() + defer idMutex.Unlock() + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go new file mode 100644 index 000000000..02a993d69 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/map.go @@ -0,0 +1,140 @@ +package resource + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// Map is a map of resources that are supported, and provides helpers for +// more easily implementing a ResourceProvider. +type Map struct { + Mapping map[string]Resource +} + +func (m *Map) Validate( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := m.Mapping[t] + if !ok { + return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} + } + + // If there is no validator set, then it is valid + if r.ConfigValidator == nil { + return nil, nil + } + + return r.ConfigValidator.Validate(c) +} + +// Apply performs a create or update depending on the diff, and calls +// the proper function on the matching Resource. +func (m *Map) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource if it is created + err := r.Destroy(s, meta) + if err != nil { + return s, err + } + + s.ID = "" + } + + // If we're only destroying, and not creating, then return now. + // Otherwise, we continue so that we can create a new resource. + if !d.RequiresNew() { + return nil, nil + } + } + + var result *terraform.InstanceState + var err error + if s.ID == "" { + result, err = r.Create(s, d, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf( + "Resource type '%s' doesn't support update", + info.Type) + } + + result, err = r.Update(s, d, meta) + } + if result != nil { + if result.Attributes == nil { + result.Attributes = make(map[string]string) + } + + result.Attributes["id"] = result.ID + } + + return result, err +} + +// Diff performs a diff on the proper resource type. +func (m *Map) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, meta) +} + +// Refresh performs a Refresh on the proper resource type. +// +// Refresh on the Resource won't be called if the state represents a +// non-created resource (ID is blank). +// +// An error is returned if the resource isn't registered. +func (m *Map) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the resource isn't created, don't refresh. + if s.ID == "" { + return s, nil + } + + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Refresh(s, meta) +} + +// Resources returns all the resources that are supported by this +// resource map and can be used to satisfy the Resources method of +// a ResourceProvider. +func (m *Map) Resources() []terraform.ResourceType { + ks := make([]string, 0, len(m.Mapping)) + for k, _ := range m.Mapping { + ks = append(ks, k) + } + sort.Strings(ks) + + rs := make([]terraform.ResourceType, 0, len(m.Mapping)) + for _, k := range ks { + rs = append(rs, terraform.ResourceType{ + Name: k, + }) + } + + return rs +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go new file mode 100644 index 000000000..80782413b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/resource.go @@ -0,0 +1,49 @@ +package resource + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/config" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +type Resource struct { + ConfigValidator *config.Validator + Create CreateFunc + Destroy DestroyFunc + Diff DiffFunc + Refresh RefreshFunc + Update UpdateFunc +} + +// CreateFunc is a function that creates a resource that didn't previously +// exist. +type CreateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) + +// DestroyFunc is a function that destroys a resource that previously +// exists using the state. +type DestroyFunc func( + *terraform.InstanceState, + interface{}) error + +// DiffFunc is a function that performs a diff of a resource. +type DiffFunc func( + *terraform.InstanceState, + *terraform.ResourceConfig, + interface{}) (*terraform.InstanceDiff, error) + +// RefreshFunc is a function that performs a refresh of a specific type +// of resource. +type RefreshFunc func( + *terraform.InstanceState, + interface{}) (*terraform.InstanceState, error) + +// UpdateFunc is a function that is called to update a resource that +// previously existed. The difference between this and CreateFunc is that +// the diff is guaranteed to only contain attributes that don't require +// a new resource. +type UpdateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go new file mode 100644 index 000000000..88a839664 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state.go @@ -0,0 +1,259 @@ +package resource + +import ( + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + time.Sleep(conf.Delay) + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go new file mode 100644 index 000000000..1e3225933 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/state_shim.go @@ -0,0 +1,188 @@ +package resource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests +func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { + state := terraform.NewState() + + // in the odd case of a nil state, let the helper packages handle it + if newState == nil { + return nil, nil + } + + for _, newMod := range newState.Modules { + mod := state.AddModule(newMod.Addr) + + for name, out := range newMod.OutputValues { + outputType := "" + val := hcl2shim.ConfigValueFromHCL2(out.Value) + ty := out.Value.Type() + switch { + case ty == cty.String: + outputType = "string" + case ty.IsTupleType() || ty.IsListType(): + outputType = "list" + case ty.IsMapType(): + outputType = "map" + } + + mod.Outputs[name] = &terraform.OutputState{ + Type: outputType, + Value: val, + Sensitive: out.Sensitive, + } + } + + for _, res := range newMod.Resources { + resType := res.Addr.Type + providerType := res.ProviderConfig.ProviderConfig.Type + + resource := getResource(providers, providerType, res.Addr) + + for key, i := range res.Instances { + resState := &terraform.ResourceState{ + Type: resType, + Provider: res.ProviderConfig.String(), + } + + // We should always have a Current instance here, but be safe about checking. + if i.Current != nil { + flatmap, err := shimmedAttributes(i.Current, resource) + if err != nil { + return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if i.Current.Private != nil { + err := json.Unmarshal(i.Current.Private, &meta) + if err != nil { + return nil, err + } + } + + resState.Primary = &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: i.Current.Status == states.ObjectTainted, + Meta: meta, + } + + if i.Current.SchemaVersion != 0 { + if resState.Primary.Meta == nil { + resState.Primary.Meta = map[string]interface{}{} + } + resState.Primary.Meta["schema_version"] = i.Current.SchemaVersion + } + + for _, dep := range i.Current.Dependencies { + resState.Dependencies = append(resState.Dependencies, dep.String()) + } + + // convert the indexes to the old style flapmap indexes + idx := "" + switch key.(type) { + case addrs.IntKey: + // don't add numeric index values to resources with a count of 0 + if len(res.Instances) > 1 { + idx = fmt.Sprintf(".%d", key) + } + case addrs.StringKey: + idx = "." + key.String() + } + + mod.Resources[res.Addr.String()+idx] = resState + } + + // add any deposed instances + for _, dep := range i.Deposed { + flatmap, err := shimmedAttributes(dep, resource) + if err != nil { + return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if dep.Private != nil { + err := json.Unmarshal(dep.Private, &meta) + if err != nil { + return nil, err + } + } + + deposed := &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: dep.Status == states.ObjectTainted, + Meta: meta, + } + if dep.SchemaVersion != 0 { + deposed.Meta = map[string]interface{}{ + "schema_version": dep.SchemaVersion, + } + } + + resState.Deposed = append(resState.Deposed, deposed) + } + } + } + } + + return state, nil +} + +func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { + p := providers[providerName] + if p == nil { + panic(fmt.Sprintf("provider %q not found in test step", providerName)) + } + + // this is only for tests, so should only see schema.Providers + provider := p.(*schema.Provider) + + switch addr.Mode { + case addrs.ManagedResourceMode: + resource := provider.ResourcesMap[addr.Type] + if resource != nil { + return resource + } + case addrs.DataResourceMode: + resource := provider.DataSourcesMap[addr.Type] + if resource != nil { + return resource + } + } + + panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) +} + +func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { + flatmap := instance.AttrsFlat + if flatmap != nil { + return flatmap, nil + } + + // if we have json attrs, they need to be decoded + rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) + if err != nil { + return nil, err + } + + instanceState, err := res.ShimInstanceStateFromValue(rio.Value) + if err != nil { + return nil, err + } + + return instanceState.Attributes, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go new file mode 100644 index 000000000..3f3e02a79 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing.go @@ -0,0 +1,1392 @@ +package resource + +import ( + "bytes" + "errors" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "syscall" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/logutils" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform-plugin-sdk/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/command/format" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload" + "github.com/hashicorp/terraform-plugin-sdk/internal/initwd" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m *testing.M) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + + if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { + os.Exit(1) + } + } else { + os.Exit(m.Run()) + } +} + +func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { + var sweeperErrorFound bool + sweeperRunList := make(map[string]map[string]error) + + for _, region := range regions { + region = strings.TrimSpace(region) + + var regionSweeperErrorFound bool + regionSweeperRunList := make(map[string]error) + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { + if allowFailures { + continue + } + + sweeperRunList[region] = regionSweeperRunList + return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) + } + } + + log.Printf("Sweeper Tests ran successfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr == nil { + fmt.Printf("\t- %s\n", sweeper) + } else { + regionSweeperErrorFound = true + } + } + + if regionSweeperErrorFound { + sweeperErrorFound = true + log.Printf("Sweeper Tests ran unsuccessfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr != nil { + fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + } + } + } + + sweeperRunList[region] = regionSweeperRunList + } + + if sweeperErrorFound { + return sweeperRunList, errors.New("at least one sweeper failed") + } + + return sweeperRunList, nil +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + for foundName, foundSweeper := range filterSweeperWithDependencies(name, source) { + sweepers[foundName] = foundSweeper + } + } + } + } + return sweepers +} + +// filterSweeperWithDependencies recursively returns sweeper and all dependencies. +// Since filterSweepers performs fuzzy matching, this function is used +// to perform exact sweeper and dependency lookup. +func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[string]*Sweeper { + result := make(map[string]*Sweeper) + + currentSweeper, ok := source[name] + if !ok { + log.Printf("[WARN] Sweeper has dependency (%s), but that sweeper was not found", name) + return result + } + + result[name] = currentSweeper + + for _, dependency := range currentSweeper.Dependencies { + for foundName, foundSweeper := range filterSweeperWithDependencies(dependency, source) { + result[foundName] = foundSweeper + } + } + + return result +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { + for _, dep := range s.Dependencies { + if depSweeper, ok := sweepers[dep]; ok { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue + } + + return err + } + } else { + log.Printf("[WARN] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) + + runE := s.F(region) + + sweeperRunList[s.Name] = runE + + if runE != nil { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestProvider can be implemented by any ResourceProvider to provide custom +// reset functionality at the start of an acceptance test. +// The helper/schema Provider implements this interface. +type TestProvider interface { + TestReset() error +} + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // Providers is the ResourceProvider that will be under test. + // + // Alternately, ProviderFactories can be specified for the providers + // that are valid. This takes priority over Providers. + // + // The end effect of each is the same: specifying the providers that + // are used within the tests. + Providers map[string]terraform.ResourceProvider + ProviderFactories map[string]terraform.ResourceProviderFactory + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string + + // provider s is used internally to maintain a reference to the + // underlying providers during the tests + providers map[string]terraform.ResourceProvider +} + +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + + if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t TestT, c TestCase) { + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logWriter, err := LogOutput(t) + if err != nil { + t.Error(fmt.Errorf("error setting up logging: %s", err)) + } + log.SetOutput(logWriter) + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() && !c.IsUnitTest { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // get instances of all providers, so we can use the individual + // resources to shim the state during the tests. + providers := make(map[string]terraform.ResourceProvider) + for name, pf := range testProviderFactories(c) { + p, err := pf() + if err != nil { + t.Fatal(err) + } + providers[name] = p + } + + providerResolver, err := testProviderResolver(c) + if err != nil { + t.Fatal(err) + } + + opts := terraform.ContextOpts{ProviderResolver: providerResolver} + + // A single state variable to track the lifecycle, starting with no state + var state *terraform.State + + // Go through each step and run it + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + errored := false + for i, step := range c.Steps { + // insert the providers into the step so we can get the resources for + // shimming the state + step.providers = providers + + var err error + log.Printf("[DEBUG] Test: Executing step %d", i) + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } + + if step.Config == "" && !step.ImportState { + err = fmt.Errorf( + "unknown test mode for step. Please see TestStep docs\n\n%#v", + step) + } else { + if step.ImportState { + if step.Config == "" { + step.Config = testProviderConfig(c) + } + + // Can optionally set step.Config in addition to + // step.ImportState, to provide config for the import. + state, err = testStepImportState(opts, state, step) + } else { + state, err = testStepConfig(opts, state, step) + } + } + + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break + } + + // If there was an error, exit + if err != nil { + // Perhaps we expected an error? Check if it matches + if step.ExpectError != nil { + if !step.ExpectError.MatchString(err.Error()) { + errored = true + t.Error(fmt.Sprintf( + "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", + i, err, step.ExpectError)) + break + } + } else { + errored = true + t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) + break + } + } + + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + log.Printf( + "[WARN] Test: Running ID-only refresh check on %s", + idRefreshCheck.Primary.ID) + if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { + log.Printf("[ERROR] Test: ID-only test failed: %s", err) + t.Error(fmt.Sprintf( + "[ERROR] Test: ID-only test failed: %s", err)) + break + } + } + } + } + + // If we never checked an id-only refresh, it is a failure. + if idRefresh { + if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { + t.Error("ID-only refresh check never ran.") + } + } + + // If we have a state, then run the destroy + if state != nil { + lastStep := c.Steps[len(c.Steps)-1] + destroyStep := TestStep{ + Config: lastStep.Config, + Check: c.CheckDestroy, + Destroy: true, + PreventDiskCleanup: lastStep.PreventDiskCleanup, + PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + providers: providers, + } + + log.Printf("[WARN] Test: Executing destroy step") + state, err := testStep(opts, state, destroyStep) + if err != nil { + t.Error(fmt.Sprintf( + "Error destroying resource! WARNING: Dangling resources\n"+ + "may exist. The full state and error is shown below.\n\n"+ + "Error: %s\n\nState: %s", + err, + state)) + } + } else { + log.Printf("[WARN] Skipping destroy test since there is no state.") + } +} + +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) string { + var lines []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + + return strings.Join(lines, "") +} + +// testProviderFactories combines the fixed Providers and +// ResourceProviderFactory functions into a single map of +// ResourceProviderFactory functions. +func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { + ctxProviders := make(map[string]terraform.ResourceProviderFactory) + for k, pf := range c.ProviderFactories { + ctxProviders[k] = pf + } + + // add any fixed providers + for k, p := range c.Providers { + ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) + } + return ctxProviders +} + +// testProviderResolver is a helper to build a ResourceProviderResolver +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderResolver(c TestCase) (providers.Resolver, error) { + ctxProviders := testProviderFactories(c) + + // wrap the old provider factories in the test grpc server so they can be + // called from terraform. + newProviders := make(map[string]providers.Factory) + + for k, pf := range ctxProviders { + factory := pf // must copy to ensure each closure sees its own value + newProviders[k] = func() (providers.Interface, error) { + p, err := factory() + if err != nil { + return nil, err + } + + // The provider is wrapped in a GRPCTestProvider so that it can be + // passed back to terraform core as a providers.Interface, rather + // than the legacy ResourceProvider. + return GRPCTestProvider(p), nil + } + } + + return providers.ResolverFixed(newProviders), nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t TestT, c TestCase) { + c.IsUnitTest = true + Test(t, c) +} + +func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { + // TODO: We guard by this right now so master doesn't explode. We + // need to remove this eventually to make this part of the normal tests. + if os.Getenv("TF_ACC_IDONLY") == "" { + return nil + } + + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: r.Type, + Name: "foo", + }.Instance(addrs.NoKey) + absAddr := addr.Absolute(addrs.RootModuleInstance) + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := states.NewState() + state.RootModule().SetResourceInstanceCurrent( + addr, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: r.Primary.Attributes, + Status: states.ObjectReady, + }, + addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), + ) + + // Create the config module. We use the full config because Refresh + // doesn't have access to it and we may need things like provider + // configurations. The initial implementation of id-only checks used + // an empty config module, but that caused the aforementioned problems. + cfg, err := testConfig(opts, step) + if err != nil { + return err + } + + // Initialize the context + opts.Config = cfg + opts.State = state + ctx, ctxDiags := terraform.NewContext(&opts) + if ctxDiags.HasErrors() { + return ctxDiags.Err() + } + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) + } + + // Refresh! + state, refreshDiags := ctx.Refresh() + if refreshDiags.HasErrors() { + return refreshDiags.Err() + } + + // Verify attribute equivalence. + actualR := state.ResourceInstance(absAddr) + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Current == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Current.AttrsFlat + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k, _ := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k, _ := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} + +func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { + if step.PreConfig != nil { + step.PreConfig() + } + + cfgPath, err := ioutil.TempDir("", "tf-test") + if err != nil { + return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) + } + + if step.PreventDiskCleanup { + log.Printf("[INFO] Skipping defer os.RemoveAll call") + } else { + defer os.RemoveAll(cfgPath) + } + + // Write the main configuration file + err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating temporary file for config: %s", err) + } + + // Create directory for our child modules, if any. + modulesDir := filepath.Join(cfgPath, ".modules") + err = os.Mkdir(modulesDir, os.ModePerm) + if err != nil { + return nil, fmt.Errorf("Error creating child modules directory: %s", err) + } + + inst := initwd.NewModuleInstaller(modulesDir, nil) + _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) + if installDiags.HasErrors() { + return nil, installDiags.Err() + } + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) + if err != nil { + return nil, fmt.Errorf("failed to create config loader: %s", err) + } + + config, configDiags := loader.LoadConfig(cfgPath) + if configDiags.HasErrors() { + return nil, configDiags + } + + return config, nil +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } + + return nil +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + } +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + } +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + } +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) + Name() string + Parallel() +} + +// This is set to true by unit tests to alter some behavior +var testTesting = false + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// operationError is a specialized implementation of error used to describe +// failures during one of the several operations performed for a particular +// test case. +type operationError struct { + OpName string + Diags tfdiags.Diagnostics +} + +func newOperationError(opName string, diags tfdiags.Diagnostics) error { + return operationError{opName, diags} +} + +// Error returns a terse error string containing just the basic diagnostic +// messages, for situations where normal Go error behavior is appropriate. +func (err operationError) Error() string { + return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +} + +// ErrorDetail is like Error except it includes verbosely-rendered diagnostics +// similar to what would come from a normal Terraform run, which include +// additional context not included in Error(). +func (err operationError) ErrorDetail() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "errors during %s:", err.OpName) + clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} + for _, diag := range err.Diags { + diagStr := format.Diagnostic(diag, nil, clr, 78) + buf.WriteByte('\n') + buf.WriteString(diagStr) + } + return buf.String() +} + +// detailedErrorMessage is a helper for calling ErrorDetail on an error if +// it is an operationError or just taking Error otherwise. +func detailedErrorMessage(err error) string { + switch tErr := err.(type) { + case operationError: + return tErr.ErrorDetail() + default: + return err.Error() + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go new file mode 100644 index 000000000..e21525de8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_config.go @@ -0,0 +1,404 @@ +package resource + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// testStepConfig runs a config-mode test step +func testStepConfig( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + return testStep(opts, state, step) +} + +func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + if !step.Destroy { + if err := testStepTaint(state, step); err != nil { + return state, err + } + } + + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + var stepDiags tfdiags.Diagnostics + + // Build the context + opts.Config = cfg + opts.State, err = terraform.ShimLegacyState(state) + if err != nil { + return nil, err + } + + opts.Destroy = step.Destroy + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) + } + if stepDiags := ctx.Validate(); len(stepDiags) > 0 { + if stepDiags.HasErrors() { + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) + } + + log.Printf("[WARN] Config warnings:\n%s", stepDiags) + } + + // Refresh! + newState, stepDiags := ctx.Refresh() + // shim the state first so the test can check the state on errors + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("refresh", stepDiags) + } + + // If this step is a PlanOnly step, skip over this first Plan and subsequent + // Apply, and use the follow up Plan that checks for perpetual diffs + if !step.PlanOnly { + // Plan! + if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("plan", stepDiags) + } else { + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) + } + + // We need to keep a copy of the state prior to destroying + // such that destroy steps can verify their behavior in the check + // function + stateBeforeApplication := state.DeepCopy() + + // Apply the diff, creating real resources. + newState, stepDiags = ctx.Apply() + // shim the state first so the test can check the state on errors + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("apply", stepDiags) + } + + // Run any configured checks + if step.Check != nil { + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } else { + if err := step.Check(state); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } + } + } + + // Now, verify that Plan is now empty and we don't have a perpetual diff issue + // We do this with TWO plans. One without a refresh. + var p *plans.Plan + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("follow-up plan", stepDiags) + } + if !p.Changes.Empty() { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // And another after a Refresh. + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + newState, stepDiags = ctx.Refresh() + if stepDiags.HasErrors() { + return state, newOperationError("follow-up refresh", stepDiags) + } + + state, err = shimNewState(newState, step.providers) + if err != nil { + return nil, err + } + } + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("second follow-up refresh", stepDiags) + } + empty := p.Changes.Empty() + + // Data resources are tricky because they legitimately get instantiated + // during refresh so that they will be already populated during the + // plan walk. Because of this, if we have any data resources in the + // config we'll end up wanting to destroy them again here. This is + // acceptable and expected, and we'll treat it as "empty" for the + // sake of this testing. + if step.Destroy && !empty { + empty = true + for _, change := range p.Changes.Resources { + if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { + empty = false + break + } + } + } + + if !empty { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } else { + return state, fmt.Errorf( + "After applying this step and refreshing, "+ + "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) + } + } + + // Made it here, but expected a non-empty plan, fail! + if step.ExpectNonEmptyPlan && empty { + return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") + } + + // Made it here? Good job test step! + return state, nil +} + +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of terraform.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of terraform.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + requiresReplace := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + + rr := []string{} + for _, p := range rc.RequiredReplace.List() { + rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) + } + requiresReplace[resourceKey] = rr + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + forceNewAttrs := requiresReplace[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == hcl2shim.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + + // This may not be as precise as in the old diff, as it matches + // everything under the attribute that was originally marked as + // ForceNew, but should help make it easier to determine what + // caused replacement here. + for _, k := range forceNewAttrs { + if strings.HasPrefix(attrK, k) { + updateMsg = " (forces new resource)" + break + } + } + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go new file mode 100644 index 000000000..561873dea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/testing_import_state.go @@ -0,0 +1,233 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// testStepImportState runs an imort state test step +func testStepImportState( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: + resource, err := testResource(step, state) + if err != nil { + return state, err + } + importId = resource.Primary.ID + } + + importPrefix := step.ImportStateIdPrefix + if importPrefix != "" { + importId = fmt.Sprintf("%s%s", importPrefix, importId) + } + + // Setup the context. We initialize with an empty state. We use the + // full config for provider configurations. + cfg, err := testConfig(opts, step) + if err != nil { + return state, err + } + + opts.Config = cfg + + // import tests start with empty state + opts.State = states.NewState() + + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, stepDiags.Err() + } + + // The test step provides the resource address as a string, so we need + // to parse it to get an addrs.AbsResourceAddress to pass in to the + // import method. + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) + if hclDiags.HasErrors() { + return nil, hclDiags + } + importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) + if stepDiags.HasErrors() { + return nil, stepDiags.Err() + } + + // Do the import + importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ + // Set the module so that any provider config is loaded + Config: cfg, + + Targets: []*terraform.ImportTarget{ + &terraform.ImportTarget{ + Addr: importAddr, + ID: importId, + }, + }, + }) + if stepDiags.HasErrors() { + log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) + return state, stepDiags.Err() + } + + newState, err := shimNewState(importedState, step.providers) + if err != nil { + return nil, err + } + + // Go through the new state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range newState.RootModule().Resources { + if r.Primary != nil { + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + } + if err := step.ImportStateCheck(states); err != nil { + return state, err + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := newState.RootModule().Resources + old := state.RootModule().Resources + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range old { + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { + oldR = r2 + break + } + } + if oldR == nil { + return state, fmt.Errorf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // We'll try our best to find the schema for this resource type + // so we can ignore Removed fields during validation. If we fail + // to find the schema then we won't ignore them and so the test + // will need to rely on explicit ImportStateVerifyIgnore, though + // this shouldn't happen in any reasonable case. + var rsrcSchema *schema.Resource + if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { + providerType := providerAddr.ProviderConfig.Type + if provider, ok := step.providers[providerType]; ok { + if provider, ok := provider.(*schema.Provider); ok { + rsrcSchema = provider.ResourcesMap[r.Type] + } + } + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + // Also remove any attributes that are marked as "Removed" in the + // schema, if we have a schema to check that against. + if rsrcSchema != nil { + for k := range actual { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(actual, k) + break + } + } + } + for k := range expected { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(expected, k) + break + } + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return state, fmt.Errorf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + // Return the old state (non-imported) so we don't change anything. + return state, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go new file mode 100644 index 000000000..e56a5155d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/resource/wait.go @@ -0,0 +1,84 @@ +package resource + +import ( + "sync" + "time" +) + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +func Retry(timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForState() + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. +func RetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md new file mode 100644 index 000000000..28c83628e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/README.md @@ -0,0 +1,11 @@ +# Terraform Helper Lib: schema + +The `schema` package provides a high-level interface for writing resource +providers for Terraform. + +If you're writing a resource provider, we recommend you use this package. + +The interface exposed by this package is much friendlier than trying to +write to the Terraform API directly. The core Terraform API is low-level +and built for maximum flexibility and control, whereas this library is built +as a framework around that to more easily write common providers. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go new file mode 100644 index 000000000..609c208b3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/backend.go @@ -0,0 +1,200 @@ +package schema + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + ctyconvert "github.com/zclconf/go-cty/cty/convert" +) + +// Backend represents a partial backend.Backend implementation and simplifies +// the creation of configuration loading and validation. +// +// Unlike other schema structs such as Provider, this struct is meant to be +// embedded within your actual implementation. It provides implementations +// only for Input and Configure and gives you a method for accessing the +// configuration in the form of a ResourceData that you're expected to call +// from the other implementation funcs. +type Backend struct { + // Schema is the schema for the configuration of this backend. If this + // Backend has no configuration this can be omitted. + Schema map[string]*Schema + + // ConfigureFunc is called to configure the backend. Use the + // FromContext* methods to extract information from the context. + // This can be nil, in which case nothing will be called but the + // config will still be stored. + ConfigureFunc func(context.Context) error + + config *ResourceData +} + +var ( + backendConfigKey = contextKey("backend config") +) + +// FromContextBackendConfig extracts a ResourceData with the configuration +// from the context. This should only be called by Backend functions. +func FromContextBackendConfig(ctx context.Context) *ResourceData { + return ctx.Value(backendConfigKey).(*ResourceData) +} + +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b == nil { + return configVal, nil + } + var diags tfdiags.Diagnostics + var err error + + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags + } + + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags +} + +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + if b == nil { + return nil + } + + var diags tfdiags.Diagnostics + sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, shimRC, nil, nil, true) + if err != nil { + diags = diags.Append(err) + return diags + } + + data, err := sm.Data(nil, diff) + if err != nil { + diags = diags.Append(err) + return diags + } + b.config = data + + if b.ConfigureFunc != nil { + err = b.ConfigureFunc(context.WithValue( + context.Background(), backendConfigKey, data)) + if err != nil { + diags = diags.Append(err) + return diags + } + } + + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *terraform.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &terraform.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } +} + +// Config returns the configuration. This is available after Configure is +// called. +func (b *Backend) Config() *ResourceData { + return b.config +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go new file mode 100644 index 000000000..fa03d8338 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/core_schema.go @@ -0,0 +1,309 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case SchemaConfigModeBlock: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a sitution where + // required-ness is conditional. Terraform Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || (err == nil && v != nil) { + reqd = false + opt = true + } + } + + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: s.Description, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() + case *Resource: + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. +func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the backends's schema. +func (r *Backend) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go new file mode 100644 index 000000000..8d93750ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/data_source_resource_shim.go @@ -0,0 +1,59 @@ +package schema + +import ( + "fmt" +) + +// DataSourceResourceShim takes a Resource instance describing a data source +// (with a Read implementation and a Schema, at least) and returns a new +// Resource instance with additional Create and Delete implementations that +// allow the data source to be used as a resource. +// +// This is a backward-compatibility layer for data sources that were formerly +// read-only resources before the data source concept was added. It should not +// be used for any *new* data sources. +// +// The Read function for the data source *must* call d.SetId with a non-empty +// id in order for this shim to function as expected. +// +// The provided Resource instance, and its schema, will be modified in-place +// to make it suitable for use as a full resource. +func DataSourceResourceShim(name string, dataSource *Resource) *Resource { + // Recursively, in-place adjust the schema so that it has ForceNew + // on any user-settable resource. + dataSourceResourceShimAdjustSchema(dataSource.Schema) + + dataSource.Create = CreateFunc(dataSource.Read) + dataSource.Delete = func(d *ResourceData, meta interface{}) error { + d.SetId("") + return nil + } + dataSource.Update = nil // should already be nil, but let's make sure + + // FIXME: Link to some further docs either on the website or in the + // changelog, once such a thing exists. + dataSource.DeprecationMessage = fmt.Sprintf( + "using %s as a resource is deprecated; consider using the data source instead", + name, + ) + + return dataSource +} + +func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { + for _, s := range schema { + // If the attribute is configurable then it must be ForceNew, + // since we have no Update implementation. + if s.Required || s.Optional { + s.ForceNew = true + } + + // If the attribute is a nested resource, we need to recursively + // apply these same adjustments to it. + if s.Elem != nil { + if r, ok := s.Elem.(*Resource); ok { + dataSourceResourceShimAdjustSchema(r.Schema) + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go new file mode 100644 index 000000000..d5e20e038 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/equal.go @@ -0,0 +1,6 @@ +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go new file mode 100644 index 000000000..2a66a068f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader.go @@ -0,0 +1,343 @@ +package schema + +import ( + "fmt" + "strconv" + "strings" +) + +// FieldReaders are responsible for decoding fields out of data into +// the proper typed representation. ResourceData uses this to query data +// out of multiple sources: config, state, diffs, etc. +type FieldReader interface { + ReadField([]string) (FieldReadResult, error) +} + +// FieldReadResult encapsulates all the resulting data from reading +// a field. +type FieldReadResult struct { + // Value is the actual read value. NegValue is the _negative_ value + // or the items that should be removed (if they existed). NegValue + // doesn't make sense for primitives but is important for any + // container types such as maps, sets, lists. + Value interface{} + ValueProcessed interface{} + + // Exists is true if the field was found in the data. False means + // it wasn't found if there was no error. + Exists bool + + // Computed is true if the field was found but the value + // is computed. + Computed bool +} + +// ValueOrZero returns the value of this result or the zero value of the +// schema type, ensuring a consistent non-nil return value. +func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { + if r.Value != nil { + return r.Value + } + + return s.ZeroValue() +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through. +func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { + parts := strings.Split(path, ".") + return addrToSchema(parts, schemaMap) +} + +// addrToSchema finds the final element schema for the given address +// and the given schema. It returns all the schemas that led to the final +// schema. These are in order of the address (out to in). +func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { + current := &Schema{ + Type: typeObject, + Elem: schemaMap, + } + + // If we aren't given an address, then the user is requesting the + // full object, so we return the special value which is the full object. + if len(addr) == 0 { + return []*Schema{current} + } + + result := make([]*Schema, 0, len(addr)) + for len(addr) > 0 { + k := addr[0] + addr = addr[1:] + + REPEAT: + // We want to trim off the first "typeObject" since its not a + // real lookup that people do. i.e. []string{"foo"} in a structure + // isn't {typeObject, typeString}, its just a {typeString}. + if len(result) > 0 || current.Type != typeObject { + result = append(result, current) + } + + switch t := current.Type; t { + case TypeBool, TypeInt, TypeFloat, TypeString: + if len(addr) > 0 { + return nil + } + case TypeList, TypeSet: + isIndex := len(addr) > 0 && addr[0] == "#" + + switch v := current.Elem.(type) { + case *Resource: + current = &Schema{ + Type: typeObject, + Elem: v.Schema, + } + case *Schema: + current = v + case ValueType: + current = &Schema{Type: v} + default: + // we may not know the Elem type and are just looking for the + // index + if isIndex { + break + } + + if len(addr) == 0 { + // we've processed the address, so return what we've + // collected + return result + } + + if len(addr) == 1 { + if _, err := strconv.Atoi(addr[0]); err == nil { + // we're indexing a value without a schema. This can + // happen if the list is nested in another schema type. + // Default to a TypeString like we do with a map + current = &Schema{Type: TypeString} + break + } + } + + return nil + } + + // If we only have one more thing and the next thing + // is a #, then we're accessing the index which is always + // an int. + if isIndex { + current = &Schema{Type: TypeInt} + break + } + + case TypeMap: + if len(addr) > 0 { + switch v := current.Elem.(type) { + case ValueType: + current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) + default: + // maps default to string values. This is all we can have + // if this is nested in another list or map. + current = &Schema{Type: TypeString} + } + } + case typeObject: + // If we're already in the object, then we want to handle Sets + // and Lists specially. Basically, their next key is the lookup + // key (the set value or the list element). For these scenarios, + // we just want to skip it and move to the next element if there + // is one. + if len(result) > 0 { + lastType := result[len(result)-2].Type + if lastType == TypeSet || lastType == TypeList { + if len(addr) == 0 { + break + } + + k = addr[0] + addr = addr[1:] + } + } + + m := current.Elem.(map[string]*Schema) + val, ok := m[k] + if !ok { + return nil + } + + current = val + goto REPEAT + } + } + + return result +} + +// readListField is a generic method for reading a list field out of a +// a FieldReader. It does this based on the assumption that there is a key +// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. +// after that point. +func readListField( + r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + addrPadded := make([]string, len(addr)+1) + copy(addrPadded, addr) + addrPadded[len(addrPadded)-1] = "#" + + // Get the number of elements in the list + countResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !countResult.Exists { + // No count, means we have no list + countResult.Value = 0 + } + + // If we have an empty list, then return an empty list + if countResult.Computed || countResult.Value.(int) == 0 { + return FieldReadResult{ + Value: []interface{}{}, + Exists: countResult.Exists, + Computed: countResult.Computed, + }, nil + } + + // Go through each count, and get the item value out of it + result := make([]interface{}, countResult.Value.(int)) + for i, _ := range result { + is := strconv.FormatInt(int64(i), 10) + addrPadded[len(addrPadded)-1] = is + rawResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !rawResult.Exists { + // This should never happen, because by the time the data + // gets to the FieldReaders, all the defaults should be set by + // Schema. + rawResult.Value = nil + } + + result[i] = rawResult.Value + } + + return FieldReadResult{ + Value: result, + Exists: true, + }, nil +} + +// readObjectField is a generic method for reading objects out of FieldReaders +// based on the assumption that building an address of []string{k, FIELD} +// will result in the proper field data. +func readObjectField( + r FieldReader, + addr []string, + schema map[string]*Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + exists := false + for field, s := range schema { + addrRead := make([]string, len(addr), len(addr)+1) + copy(addrRead, addr) + addrRead = append(addrRead, field) + rawResult, err := r.ReadField(addrRead) + if err != nil { + return FieldReadResult{}, err + } + if rawResult.Exists { + exists = true + } + + result[field] = rawResult.ValueOrZero(s) + } + + return FieldReadResult{ + Value: result, + Exists: exists, + }, nil +} + +// convert map values to the proper primitive type based on schema.Elem +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err + } + + switch elemType { + case TypeInt, TypeFloat, TypeBool: + for k, v := range m { + vs, ok := v.(string) + if !ok { + continue + } + + v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) + if err != nil { + return err + } + + m[k] = v + } + } + return nil +} + +func stringToPrimitive( + value string, computed bool, schema *Schema) (interface{}, error) { + var returnVal interface{} + switch schema.Type { + case TypeBool: + if value == "" { + returnVal = false + break + } + if computed { + break + } + + v, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + + returnVal = v + case TypeFloat: + if value == "" { + returnVal = 0.0 + break + } + if computed { + break + } + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, err + } + + returnVal = v + case TypeInt: + if value == "" { + returnVal = 0 + break + } + if computed { + break + } + + v, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return nil, err + } + + returnVal = int(v) + case TypeString: + returnVal = value + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } + + return returnVal, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go new file mode 100644 index 000000000..dc2ae1af5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_config.go @@ -0,0 +1,353 @@ +package schema + +import ( + "fmt" + "log" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/mapstructure" +) + +// ConfigFieldReader reads fields out of an untyped map[string]string to the +// best of its ability. It also applies defaults from the Schema. (The other +// field readers do not need default handling because they source fully +// populated data structures.) +type ConfigFieldReader struct { + Config *terraform.ResourceConfig + Schema map[string]*Schema + + indexMaps map[string]map[string]int + once sync.Once +} + +func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { + r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) + return r.readField(address, false) +} + +func (r *ConfigFieldReader) readField( + address []string, nested bool) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + if !nested { + // If we have a set anywhere in the address, then we need to + // read that set out in order and actually replace that part of + // the address with the real list index. i.e. set.50 might actually + // map to set.12 in the config, since it is in list order in the + // config, not indexed by set value. + for i, v := range schemaList { + // Sets are the only thing that cause this issue. + if v.Type != TypeSet { + continue + } + + // If we're at the end of the list, then we don't have to worry + // about this because we're just requesting the whole set. + if i == len(schemaList)-1 { + continue + } + + // If we're looking for the count, then ignore... + if address[i+1] == "#" { + continue + } + + indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] + if !ok { + // Get the set so we can get the index map that tells us the + // mapping of the hash code to the list index + _, err := r.readSet(address[:i+1], v) + if err != nil { + return FieldReadResult{}, err + } + indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] + } + + index, ok := indexMap[address[i+1]] + if !ok { + return FieldReadResult{}, nil + } + + address[i+1] = strconv.FormatInt(int64(index), 10) + } + } + + k := strings.Join(address, ".") + schema := schemaList[len(schemaList)-1] + + // If we're getting the single element of a promoted list, then + // check to see if we have a single element we need to promote. + if address[len(address)-1] == "0" && len(schemaList) > 1 { + lastSchema := schemaList[len(schemaList)-2] + if lastSchema.Type == TypeList && lastSchema.PromoteSingle { + k := strings.Join(address[:len(address)-1], ".") + result, err := r.readPrimitive(k, schema) + if err == nil { + return result, nil + } + } + } + + if protoVersion5 { + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil + } + } + } + } + + switch schema.Type { + case TypeBool, TypeFloat, TypeInt, TypeString: + return r.readPrimitive(k, schema) + case TypeList: + // If we support promotion then we first check if we have a lone + // value that we must promote. + // a value that is alone. + if schema.PromoteSingle { + result, err := r.readPrimitive(k, schema.Elem.(*Schema)) + if err == nil && result.Exists { + result.Value = []interface{}{result.Value} + return result, nil + } + } + + return readListField(&nestedConfigFieldReader{r}, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField( + &nestedConfigFieldReader{r}, + address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + // We want both the raw value and the interpolated. We use the interpolated + // to store actual values and we use the raw one to check for + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. + mraw, ok := r.Config.GetRaw(k) + if !ok { + // check if this is from an interpolated field by seeing if it exists + // in the config + _, ok := r.Config.Get(k) + if !ok { + // this really doesn't exist + return FieldReadResult{}, nil + } + + // We couldn't fetch the value from a nested data structure, so treat the + // raw value as an interpolation string. The mraw value is only used + // for the type switch below. + mraw = "${INTERPOLATED}" + } + + result := make(map[string]interface{}) + computed := false + switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } + case []interface{}: + for i, innerRaw := range m { + for ik := range innerRaw.(map[string]interface{}) { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case []map[string]interface{}: + for i, innerRaw := range m { + for ik := range innerRaw { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case map[string]interface{}: + for ik := range m { + key := fmt.Sprintf("%s.%s", k, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + case nil: + // the map may have been empty on the configuration, so we leave the + // empty result + default: + panic(fmt.Sprintf("unknown type: %#v", mraw)) + } + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var value interface{} + if !computed { + value = result + } + + return FieldReadResult{ + Value: value, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readPrimitive( + k string, schema *Schema) (FieldReadResult, error) { + raw, ok := r.Config.Get(k) + if !ok { + // Nothing in config, but we might still have a default from the schema + var err error + raw, err = schema.DefaultValue() + if err != nil { + return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err) + } + + if raw == nil { + return FieldReadResult{}, nil + } + } + + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return FieldReadResult{}, err + } + + computed := r.Config.IsComputed(k) + returnVal, err := stringToPrimitive(result, computed, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + indexMap := make(map[string]int) + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + return FieldReadResult{Value: set}, nil + } + + // If the list is computed, the set is necessarilly computed + if raw.Computed { + return FieldReadResult{ + Value: set, + Exists: true, + Computed: raw.Computed, + }, nil + } + + // Build up the set from the list elements + for i, v := range raw.Value.([]interface{}) { + // Check if any of the keys in this item are computed + computed := r.hasComputedSubKeys( + fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) + + code := set.add(v, computed) + indexMap[code] = i + } + + r.indexMaps[strings.Join(address, ".")] = indexMap + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// hasComputedSubKeys walks through a schema and returns whether or not the +// given key contains any subkeys that are computed. +func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { + prefix := key + "." + + switch t := schema.Elem.(type) { + case *Resource: + for k, schema := range t.Schema { + if r.Config.IsComputed(prefix + k) { + return true + } + + if r.hasComputedSubKeys(prefix+k, schema) { + return true + } + } + } + + return false +} + +// nestedConfigFieldReader is a funny little thing that just wraps a +// ConfigFieldReader to call readField when ReadField is called so that +// we don't recalculate the set rewrites in the address, which leads to +// an infinite loop. +type nestedConfigFieldReader struct { + Reader *ConfigFieldReader +} + +func (r *nestedConfigFieldReader) ReadField( + address []string) (FieldReadResult, error) { + return r.Reader.readField(address, true) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go new file mode 100644 index 000000000..c099029af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_diff.go @@ -0,0 +1,244 @@ +package schema + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/mapstructure" +) + +// DiffFieldReader reads fields out of a diff structures. +// +// It also requires access to a Reader that reads fields from the structure +// that the diff was derived from. This is usually the state. This is required +// because a diff on its own doesn't have complete data about full objects +// such as maps. +// +// The Source MUST be the data that the diff was derived from. If it isn't, +// the behavior of this struct is undefined. +// +// Reading fields from a DiffFieldReader is identical to reading from +// Source except the diff will be applied to the end result. +// +// The "Exists" field on the result will be set to true if the complete +// field exists whether its from the source, diff, or a combination of both. +// It cannot be determined whether a retrieved value is composed of +// diff elements. +type DiffFieldReader struct { + Diff *terraform.InstanceDiff + Source FieldReader + Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error +} + +func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} + return FieldReadResult{}, nil + } + + var res FieldReadResult + var err error + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + res, err = r.readPrimitive(address, schema) + case TypeList: + res, err = readListField(r, address, schema) + case TypeMap: + res, err = r.readMap(address, schema) + case TypeSet: + res, err = r.readSet(address, schema) + case typeObject: + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err +} + +func (r *DiffFieldReader) readMap( + address []string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // First read the map from the underlying source + source, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if source.Exists { + // readMap may return a nil value, or an unknown value placeholder in + // some cases, causing the type assertion to panic if we don't assign the ok value + result, _ = source.Value.(map[string]interface{}) + resultSet = true + } + + // Next, read all the elements we have in our diff, and apply + // the diff to our result. + prefix := strings.Join(address, ".") + "." + for k, v := range r.Diff.Attributes { + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasPrefix(k, prefix+"%") { + // Ignore the count field + continue + } + + resultSet = true + + k = k[len(prefix):] + if v.NewRemoved { + delete(result, k) + continue + } + + result[k] = v.New + } + + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *DiffFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + + attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] + if !ok { + return result, nil + } + + var resultVal string + if !attrD.NewComputed { + resultVal = attrD.New + if attrD.NewExtra != nil { + result.ValueProcessed = resultVal + if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { + return FieldReadResult{}, err + } + } + } + + result.Computed = attrD.NewComputed + result.Exists = true + result.Value, err = stringToPrimitive(resultVal, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return result, nil +} + +func (r *DiffFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + prefix := strings.Join(address, ".") + "." + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // Go through the map and find all the set items + for k, d := range r.Diff.Attributes { + if d.NewRemoved { + // If the field is removed, we always ignore it + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasSuffix(k, "#") { + // Ignore any count field + continue + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + raw, err := r.ReadField(append(address, idx)) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + } + + // Determine if the set "exists". It exists if there are items or if + // the diff explicitly wanted it empty. + exists := set.Len() > 0 + if !exists { + // We could check if the diff value is "0" here but I think the + // existence of "#" on its own is enough to show it existed. This + // protects us in the future from the zero value changing from + // "0" to "" breaking us (if that were to happen). + if _, ok := r.Diff.Attributes[prefix+"#"]; ok { + exists = true + } + } + + if !exists { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if result.Exists { + return result, nil + } + } + + return FieldReadResult{ + Value: set, + Exists: exists, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go new file mode 100644 index 000000000..53f73b71b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_map.go @@ -0,0 +1,235 @@ +package schema + +import ( + "fmt" + "strings" +) + +// MapFieldReader reads fields out of an untyped map[string]string to +// the best of its ability. +type MapFieldReader struct { + Map MapReader + Schema map[string]*Schema +} + +func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { + k := strings.Join(address, ".") + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // If the name of the map field is directly in the map with an + // empty string, it means that the map is being deleted, so mark + // that is is set. + if v, ok := r.Map.Access(k); ok && v == "" { + resultSet = true + } + + prefix := k + "." + r.Map.Range(func(k, v string) bool { + if strings.HasPrefix(k, prefix) { + resultSet = true + + key := k[len(prefix):] + if key != "%" && key != "#" { + result[key] = v + } + } + + return true + }) + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *MapFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + k := strings.Join(address, ".") + result, ok := r.Map.Access(k) + if !ok { + return FieldReadResult{}, nil + } + + returnVal, err := stringToPrimitive(result, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + }, nil +} + +func (r *MapFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + // Get the number of elements in the list + countRaw, err := r.readPrimitive( + append(address, "#"), &Schema{Type: TypeInt}) + if err != nil { + return FieldReadResult{}, err + } + if !countRaw.Exists { + // No count, means we have no list + countRaw.Value = 0 + } + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // If we have an empty list, then return an empty list + if countRaw.Computed || countRaw.Value.(int) == 0 { + return FieldReadResult{ + Value: set, + Exists: countRaw.Exists, + Computed: countRaw.Computed, + }, nil + } + + // Go through the map and find all the set items + prefix := strings.Join(address, ".") + "." + countExpected := countRaw.Value.(int) + countActual := make(map[string]struct{}) + completed := r.Map.Range(func(k, _ string) bool { + if !strings.HasPrefix(k, prefix) { + return true + } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + return true + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + var raw FieldReadResult + raw, err = r.ReadField(append(address, idx)) + if err != nil { + return false + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + + // Due to the way multimap readers work, if we've seen the number + // of fields we expect, then exit so that we don't read later values. + // For example: the "set" map might have "ports.#", "ports.0", and + // "ports.1", but the "state" map might have those plus "ports.2". + // We don't want "ports.2" + countActual[idx] = struct{}{} + if len(countActual) >= countExpected { + return false + } + + return true + }) + if !completed && err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// MapReader is an interface that is given to MapFieldReader for accessing +// a "map". This can be used to have alternate implementations. For a basic +// map[string]string, use BasicMapReader. +type MapReader interface { + Access(string) (string, bool) + Range(func(string, string) bool) bool +} + +// BasicMapReader implements MapReader for a single map. +type BasicMapReader map[string]string + +func (r BasicMapReader) Access(k string) (string, bool) { + v, ok := r[k] + return v, ok +} + +func (r BasicMapReader) Range(f func(string, string) bool) bool { + for k, v := range r { + if cont := f(k, v); !cont { + return false + } + } + + return true +} + +// MultiMapReader reads over multiple maps, preferring keys that are +// founder earlier (lower number index) vs. later (higher number index) +type MultiMapReader []map[string]string + +func (r MultiMapReader) Access(k string) (string, bool) { + for _, m := range r { + if v, ok := m[k]; ok { + return v, ok + } + } + + return "", false +} + +func (r MultiMapReader) Range(f func(string, string) bool) bool { + done := make(map[string]struct{}) + for _, m := range r { + for k, v := range m { + if _, ok := done[k]; ok { + continue + } + + if cont := f(k, v); !cont { + return false + } + + done[k] = struct{}{} + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go new file mode 100644 index 000000000..89ad3a86f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_reader_multi.go @@ -0,0 +1,63 @@ +package schema + +import ( + "fmt" +) + +// MultiLevelFieldReader reads from other field readers, +// merging their results along the way in a specific order. You can specify +// "levels" and name them in order to read only an exact level or up to +// a specific level. +// +// This is useful for saying things such as "read the field from the state +// and config and merge them" or "read the latest value of the field". +type MultiLevelFieldReader struct { + Readers map[string]FieldReader + Levels []string +} + +func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { + return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) +} + +func (r *MultiLevelFieldReader) ReadFieldExact( + address []string, level string) (FieldReadResult, error) { + reader, ok := r.Readers[level] + if !ok { + return FieldReadResult{}, fmt.Errorf( + "Unknown reader level: %s", level) + } + + result, err := reader.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", level, err) + } + + return result, nil +} + +func (r *MultiLevelFieldReader) ReadFieldMerge( + address []string, level string) (FieldReadResult, error) { + var result FieldReadResult + for _, l := range r.Levels { + if r, ok := r.Readers[l]; ok { + out, err := r.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", l, err) + } + + // TODO: computed + if out.Exists { + result = out + } + } + + if l == level { + break + } + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go new file mode 100644 index 000000000..9abc41b54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer.go @@ -0,0 +1,8 @@ +package schema + +// FieldWriters are responsible for writing fields by address into +// a proper typed representation. ResourceData uses this to write new data +// into existing sources. +type FieldWriter interface { + WriteField([]string, interface{}) error +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go new file mode 100644 index 000000000..c09358b1b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/field_writer_map.go @@ -0,0 +1,357 @@ +package schema + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" +) + +// MapFieldWriter writes data into a single map[string]string structure. +type MapFieldWriter struct { + Schema map[string]*Schema + + lock sync.Mutex + result map[string]string +} + +// Map returns the underlying map that is being written to. +func (w *MapFieldWriter) Map() map[string]string { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + return w.result +} + +func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + w.result[addr] = value +} + +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + +func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + // If we're setting anything other than a list root or set root, + // then disallow it. + for _, schema := range schemaList[:len(schemaList)-1] { + if schema.Type == TypeList { + return fmt.Errorf( + "%s: can only set full list", + strings.Join(addr, ".")) + } + + if schema.Type == TypeMap { + return fmt.Errorf( + "%s: can only set full map", + strings.Join(addr, ".")) + } + + if schema.Type == TypeSet { + return fmt.Errorf( + "%s: can only set full set", + strings.Join(addr, ".")) + } + } + + return w.set(addr, value) +} + +func (w *MapFieldWriter) set(addr []string, value interface{}) error { + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return w.setPrimitive(addr, value, schema) + case TypeList: + return w.setList(addr, value, schema) + case TypeMap: + return w.setMap(addr, value, schema) + case TypeSet: + return w.setSet(addr, value, schema) + case typeObject: + return w.setObject(addr, value, schema) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (w *MapFieldWriter) setList( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + setElement := func(idx string, value interface{}) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + return w.set(append(addrCopy, idx), value) + } + + var vs []interface{} + if err := mapstructure.Decode(v, &vs); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + + // Set the entire list. + var err error + for i, elem := range vs { + is := strconv.FormatInt(int64(i), 10) + err = setElement(is, elem) + if err != nil { + break + } + } + if err != nil { + for i, _ := range vs { + is := strconv.FormatInt(int64(i), 10) + setElement(is, nil) + } + + return err + } + + w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) + return nil +} + +func (w *MapFieldWriter) setMap( + addr []string, + value interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + v := reflect.ValueOf(value) + vs := make(map[string]interface{}) + + if value == nil { + // The empty string here means the map is removed. + w.result[k] = "" + return nil + } + + if v.Kind() != reflect.Map { + return fmt.Errorf("%s: must be a map", k) + } + if v.Type().Key().Kind() != reflect.String { + return fmt.Errorf("%s: keys must strings", k) + } + for _, mk := range v.MapKeys() { + mv := v.MapIndex(mk) + vs[mk.String()] = mv.Interface() + } + + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + + // Remove the pure key since we're setting the full map value + delete(w.result, k) + + // Set each subkey + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + for subKey, v := range vs { + if err := w.set(append(addrCopy, subKey), v); err != nil { + return err + } + } + + // Set the count + w.result[k+".%"] = strconv.Itoa(len(vs)) + + return nil +} + +func (w *MapFieldWriter) setObject( + addr []string, + value interface{}, + schema *Schema) error { + // Set the entire object. First decode into a proper structure + var v map[string]interface{} + if err := mapstructure.Decode(value, &v); err != nil { + return fmt.Errorf("%s: %s", strings.Join(addr, "."), err) + } + + // Make space for additional elements in the address + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + + // Set each element in turn + var err error + for k1, v1 := range v { + if err = w.set(append(addrCopy, k1), v1); err != nil { + break + } + } + if err != nil { + for k1, _ := range v { + w.set(append(addrCopy, k1), nil) + } + } + + return err +} + +func (w *MapFieldWriter) setPrimitive( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + + if v == nil { + // The empty string here means the value is removed. + w.result[k] = "" + return nil + } + + var set string + switch schema.Type { + case TypeBool: + var b bool + if err := mapstructure.Decode(v, &b); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + set = strconv.FormatBool(b) + case TypeString: + if err := mapstructure.Decode(v, &set); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + case TypeInt: + var n int + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatInt(int64(n), 10) + case TypeFloat: + var n float64 + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatFloat(float64(n), 'G', -1, 64) + default: + return fmt.Errorf("Unknown type: %#v", schema.Type) + } + + w.result[k] = set + return nil +} + +func (w *MapFieldWriter) setSet( + addr []string, + value interface{}, + schema *Schema) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + k := strings.Join(addr, ".") + + if value == nil { + w.result[k+".#"] = "0" + return nil + } + + // If it is a slice, then we have to turn it into a *Set so that + // we get the proper order back based on the hash code. + if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { + // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] + tempSchema := *schema + tempSchema.Type = TypeList + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} + tempW := &MapFieldWriter{Schema: tempSchemaMap} + + // Set the entire list, this lets us get sane values out of it + if err := tempW.WriteField(tempAddr, value); err != nil { + return err + } + + // Build the set by going over the list items in order and + // hashing them into the set. The reason we go over the list and + // not the `value` directly is because this forces all types + // to become []interface{} (generic) instead of []string, which + // most hash functions are expecting. + s := schema.ZeroValue().(*Set) + tempR := &MapFieldReader{ + Map: BasicMapReader(tempW.Map()), + Schema: tempSchemaMap, + } + for i := 0; i < v.Len(); i++ { + is := strconv.FormatInt(int64(i), 10) + result, err := tempR.ReadField(append(tempAddr, is)) + if err != nil { + return err + } + if !result.Exists { + panic("set item just set doesn't exist") + } + + s.Add(result.Value) + } + + value = s + } + + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + + for code, elem := range value.(*Set).m { + if err := w.set(append(addrCopy, code), elem); err != nil { + return err + } + } + + w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go new file mode 100644 index 000000000..0184d7b08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/getsource_string.go @@ -0,0 +1,46 @@ +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[getSourceState-1] + _ = x[getSourceConfig-2] + _ = x[getSourceDiff-4] + _ = x[getSourceSet-8] + _ = x[getSourceExact-16] + _ = x[getSourceLevelMask-15] +} + +const ( + _getSource_name_0 = "getSourceStategetSourceConfig" + _getSource_name_1 = "getSourceDiff" + _getSource_name_2 = "getSourceSet" + _getSource_name_3 = "getSourceLevelMaskgetSourceExact" +) + +var ( + _getSource_index_0 = [...]uint8{0, 14, 29} + _getSource_index_3 = [...]uint8{0, 18, 32} +) + +func (i getSource) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] + case i == 4: + return _getSource_name_1 + case i == 8: + return _getSource_name_2 + case 15 <= i && i <= 16: + i -= 15 + return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] + default: + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go new file mode 100644 index 000000000..bbea5dbd5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/provider.go @@ -0,0 +1,474 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // See the ConfigureFunc documentation for more information. + ConfigureFunc ConfigureFunc + + // MetaReset is called by TestReset to reset any state stored in the meta + // interface. This is especially important if the StopContext is stored by + // the provider. + MetaReset func() error + + meta interface{} + + // a mutex is required because TestReset can directly replace the stopCtx + stopMu sync.Mutex + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k, _ := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// Stopped reports whether the provider has been stopped or not. +func (p *Provider) Stopped() bool { + ctx := p.StopContext() + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// StopCh returns a channel that is closed once the provider is stopped. +func (p *Provider) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + return p.stopCtx +} + +func (p *Provider) stopInit() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvider interface. +func (p *Provider) Stop() error { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtxCancel() + return nil +} + +// TestReset resets any state stored in the Provider, and will call TestReset +// on Meta if it implements the TestProvider interface. +// This may be used to reset the schema.Provider at the start of a test, and is +// automatically called by resource.Test. +func (p *Provider) TestReset() error { + p.stopInit() + if p.MetaReset != nil { + return p.MetaReset() + } + return nil +} + +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Input implementation of terraform.ResourceProvider interface. +func (p *Provider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + return schemaMap(p.Schema).Input(input, c) +} + +// Validate implementation of terraform.ResourceProvider interface. +func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.ResourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support resource: %s", t)} + } + + return r.Validate(c) +} + +// Configure implementation of terraform.ResourceProvider interface. +func (p *Provider) Configure(c *terraform.ResourceConfig) error { + // No configuration + if p.ConfigureFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c, nil, p.meta, true) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + + if p.TerraformVersion == "" { + // Terraform 0.12 introduced this field to the protocol + // We can therefore assume that if it's unconfigured at this point, it's 0.10 or 0.11 + p.TerraformVersion = "0.11+compatible" + } + meta, err := p.ConfigureFunc(data) + if err != nil { + return err + } + + p.meta = meta + return nil +} + +// Apply implementation of terraform.ResourceProvider interface. +func (p *Provider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Apply(s, d, p.meta) +} + +// Diff implementation of terraform.ResourceProvider interface. +func (p *Provider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, p.meta) +} + +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) +} + +// Refresh implementation of terraform.ResourceProvider interface. +func (p *Provider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Refresh(s, p.meta) +} + +// Resources implementation of terraform.ResourceProvider interface. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +func (p *Provider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil { + var err error + results, err = r.Importer.State(data, p.meta) + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.DataSourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support data source: %s", t)} + } + + return r.Validate(c) +} + +// ReadDataDiff implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.Diff(nil, c, p.meta) +} + +// RefreshData implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.ReadDataApply(d, p.meta) +} + +// DataSources implementation of terraform.ResourceProvider interface. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k, _ := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go new file mode 100644 index 000000000..406dcdf71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource.go @@ -0,0 +1,831 @@ +package schema + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/zclconf/go-cty/cty" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not implemented, + // then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // If any errors occur during each of the operation, an error should be + // returned. If a resource was partially updated, be careful to enable + // partial state mode for ResourceData and use it accordingly. + // + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + Exists ExistsFunc + + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// See Resource documentation. +type CreateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ReadFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type UpdateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type DeleteFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, err + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + if err := r.Delete(data, meta); err != nil { + return r.recordCurrentSchemaVersion(data.State()), err + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, nil + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + if err != nil { + return nil, err + } + } + + err = nil + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + err = r.Create(data, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf("doesn't support update") + } + + err = r.Update(data, meta) + } + + return r.recordCurrentSchemaVersion(data.State()), err +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) simpleDiff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { + warns, errs := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) + } + + return warns, errs +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, error) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), err +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.Create != nil || r.Update != nil || r.Delete != nil { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if r.Update == nil { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if r.Read == nil { + return fmt.Errorf("Read must be implemented") + } + if r.Delete == nil { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.Create != nil || r.Read != nil) +} + +// Determines if a given InstanceState needs to be migrated by checking the +// stored version number with the current SchemaVersion +func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { + // Get the raw interface{} value for the schema version. If it doesn't + // exist or is nil then set it to zero. + raw := is.Meta["schema_version"] + if raw == nil { + raw = "0" + } + + // Try to convert it to a string. If it isn't a string then we pretend + // that it isn't set at all. It should never not be a string unless it + // was manually tampered with. + rawString, ok := raw.(string) + if !ok { + rawString = "0" + } + + stateSchemaVersion, _ := strconv.Atoi(rawString) + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go new file mode 100644 index 000000000..259487ed5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data.go @@ -0,0 +1,561 @@ +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but check for changes, +// define partial state updates, etc. +// +// The most relevant methods to take a look at are Get, Set, and Partial. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + partialMap map[string]struct{} + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary +// values, bypassing schema. This MUST NOT be used in normal circumstances - +// it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. +func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { + d.once.Do(d.init) + + d.setWriter.unsafeWriteField(key, value) +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceData) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial turns partial state mode on/off. +// +// When partial state mode is enabled, then only key prefixes specified +// by SetPartial will be in the final state. This allows providers to return +// partial states for partially applied resources (when errors occur). +func (d *ResourceData) Partial(on bool) { + d.partial = on + if on { + if d.partialMap == nil { + d.partialMap = make(map[string]struct{}) + } + } else { + d.partialMap = nil + } +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err +} + +// SetPartial adds the key to the final state output while +// in partial state mode. The key must be a root key in the schema (i.e. +// it cannot be "list.0"). +// +// If partial state mode is disabled, then this has no effect. Additionally, +// whenever partial state mode is toggled, the partial data is cleared. +func (d *ResourceData) SetPartial(k string) { + if d.partial { + d.partialMap[k] = struct{}{} + } +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k, _ := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + if _, ok := d.partialMap[k]; ok { + source = getSourceSet + } + } + + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go new file mode 100644 index 000000000..8bfb079be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_data_get_source.go @@ -0,0 +1,17 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go new file mode 100644 index 000000000..f55a66e14 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_diff.go @@ -0,0 +1,559 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *terraform.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *terraform.InstanceState + + // The diff created by Terraform. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *terraform.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if an overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go new file mode 100644 index 000000000..5dada3caf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_importer.go @@ -0,0 +1,52 @@ +package schema + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // The functions below must all be implemented for importing to work. + + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. + State StateFunc +} + +// StateFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go new file mode 100644 index 000000000..f12bf7259 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/resource_timeout.go @@ -0,0 +1,263 @@ +package schema + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/copystructure" +) + +const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" +const TimeoutsConfigKey = "timeouts" + +const ( + TimeoutCreate = "create" + TimeoutRead = "read" + TimeoutUpdate = "update" + TimeoutDelete = "delete" + TimeoutDefault = "default" +) + +func timeoutKeys() []string { + return []string{ + TimeoutCreate, + TimeoutRead, + TimeoutUpdate, + TimeoutDelete, + TimeoutDefault, + } +} + +// could be time.Duration, int64 or float64 +func DefaultTimeout(tx interface{}) *time.Duration { + var td time.Duration + switch raw := tx.(type) { + case time.Duration: + return &raw + case int64: + td = time.Duration(raw) + case float64: + td = time.Duration(int64(raw)) + default: + log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) + } + return &td +} + +type ResourceTimeout struct { + Create, Read, Update, Delete, Default *time.Duration +} + +// ConfigDecode takes a schema and the configuration (available in Diff) and +// validates, parses the timeouts into `t` +func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error { + if s.Timeouts != nil { + raw, err := copystructure.Copy(s.Timeouts) + if err != nil { + log.Printf("[DEBUG] Error with deep copy: %s", err) + } + *t = *raw.(*ResourceTimeout) + } + + if raw, ok := c.Config[TimeoutsConfigKey]; ok { + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == hcl2shim.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + case []interface{}: + for _, r := range raw { + if rMap, ok := r.(map[string]interface{}); ok { + rawTimeouts = append(rawTimeouts, rMap) + } else { + // Go will not allow a fallthrough + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break + } + } + + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } + + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } + + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } + + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) + } + + *timeout = rt + } + return nil + } + } + + return nil +} + +func unsupportedTimeoutKeyError(key string) error { + return fmt.Errorf("Timeout Key (%s) is not supported", key) +} + +// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder +// interface: they encode/decode a timeouts struct from an instance diff, which is +// where the timeout data is stored after a diff to pass into Apply. +// +// StateEncode encodes the timeout into the ResourceData's InstanceState for +// saving to state +// +func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { + return t.metaEncode(id) +} + +func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error { + return t.metaEncode(is) +} + +// metaEncode encodes the ResourceTimeout into a map[string]interface{} format +// and stores it in the Meta field of the interface it's given. +// Assumes the interface is either *terraform.InstanceState or +// *terraform.InstanceDiff, returns an error otherwise +func (t *ResourceTimeout) metaEncode(ids interface{}) error { + m := make(map[string]interface{}) + + if t.Create != nil { + m[TimeoutCreate] = t.Create.Nanoseconds() + } + if t.Read != nil { + m[TimeoutRead] = t.Read.Nanoseconds() + } + if t.Update != nil { + m[TimeoutUpdate] = t.Update.Nanoseconds() + } + if t.Delete != nil { + m[TimeoutDelete] = t.Delete.Nanoseconds() + } + if t.Default != nil { + m[TimeoutDefault] = t.Default.Nanoseconds() + // for any key above that is nil, if default is specified, we need to + // populate it with the default + for _, k := range timeoutKeys() { + if _, ok := m[k]; !ok { + m[k] = t.Default.Nanoseconds() + } + } + } + + // only add the Timeout to the Meta if we have values + if len(m) > 0 { + switch instance := ids.(type) { + case *terraform.InstanceDiff: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + case *terraform.InstanceState: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + default: + return fmt.Errorf("Error matching type for Diff Encode") + } + } + + return nil +} + +func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error { + return t.metaDecode(id) +} +func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error { + return t.metaDecode(is) +} + +func (t *ResourceTimeout) metaDecode(ids interface{}) error { + var rawMeta interface{} + var ok bool + switch rawInstance := ids.(type) { + case *terraform.InstanceDiff: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + case *terraform.InstanceState: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + default: + return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) + } + + times := rawMeta.(map[string]interface{}) + if len(times) == 0 { + return nil + } + + if v, ok := times[TimeoutCreate]; ok { + t.Create = DefaultTimeout(v) + } + if v, ok := times[TimeoutRead]; ok { + t.Read = DefaultTimeout(v) + } + if v, ok := times[TimeoutUpdate]; ok { + t.Update = DefaultTimeout(v) + } + if v, ok := times[TimeoutDelete]; ok { + t.Delete = DefaultTimeout(v) + } + if v, ok := times[TimeoutDefault]; ok { + t.Default = DefaultTimeout(v) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go new file mode 100644 index 000000000..0cd64635e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/schema.go @@ -0,0 +1,1976 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" +) + +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + +// type used for schema package context keys +type contextKey string + +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs or asking for user + // input. It should be relatively short (a few sentences max) and should + // be formatted to fit a CLI. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. + PromoteSingle bool + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + // + // ExactlyOneOf is a set of schema keys that, when set, only one of the + // keys in that list can be specified. It will error if none are + // specified as well. + // + // AtLeastOneOf is a set of schema keys that, when set, at least one of + // the keys in that list must be specified. + ConflictsWith []string + ExactlyOneOf []string + AtLeastOneOf []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // When Removed is set, this attribute has been removed from the schema + // + // Removed attributes can be left in the Schema to generate informative error + // messages for the user when they show up in resource configurations. + // This string is the message shown to the user with instructions on + // what do to about the removed attribute. + Removed string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + ValidateFunc SchemaValidateFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Input implements the terraform.ResourceProvider method by asking +// for input for required configuration keys that don't have a value. +func (m schemaMap) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := m[k] + + // Skip things that don't require config, if that is even valid + // for a provider schema. + // Required XOR Optional must always be true to validate, so we only + // need to check one. + if v.Optional { + continue + } + + // Deprecated fields should never prompt + if v.Deprecated != "" { + continue + } + + // Skip things that have a value of some sort already + if _, ok := c.Raw[k]; ok { + continue + } + + // Skip if it has a default value + defaultValue, err := v.DefaultValue() + if err != nil { + return nil, fmt.Errorf("%s: error loading default: %s", k, err) + } + if defaultValue != nil { + continue + } + + var value interface{} + switch v.Type { + case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: + continue + case TypeString: + value, err = m.inputString(input, k, v) + default: + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) + } + + if err != nil { + return nil, fmt.Errorf( + "%s: %s", k, err) + } + + c.Config[k] = value + } + + return c, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { + return m.validateObject("", m, c) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ExactlyOneOf) > 0 && v.Required { + return fmt.Errorf("%s: ExactlyOneOf cannot be set with Required", k) + } + + if len(v.AtLeastOneOf) > 0 && v.Required { + return fmt.Errorf("%s: AtLeastOneOf cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap) + if err != nil { + return fmt.Errorf("ConflictsWith: %+v", err) + } + } + + if len(v.ExactlyOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap) + if err != nil { + return fmt.Errorf("ExactlyOneOf: %+v", err) + } + } + + if len(v.AtLeastOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap) + if err != nil { + return fmt.Errorf("AtLeastOneOf: %+v", err) + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + + if v.ValidateFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + } + } + + if v.Deprecated == "" && v.Removed == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap) error { + for _, key := range keys { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for _, part := range parts { + // Skip index fields + if _, err := strconv.Atoi(part); err == nil { + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) + } + + if subResource, ok := target.Elem.(*Resource); ok { + sm = schemaMap(subResource.Schema) + } + } + if target == nil { + return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) + } + if target.Required { + return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) + } + } + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + + return nil +} + +func (m schemaMap) inputString( + input terraform.UIInput, + k string, + schema *Schema) (interface{}, error) { + result, err := input.Input(context.Background(), &terraform.InputOpts{ + Id: k, + Query: k, + Description: schema.Description, + Default: schema.InputDefault, + }) + + return result, err +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return nil, []error{fmt.Errorf( + "%q, error loading default: %s", k, err)} + } + + // We're okay as long as we had a value set + ok = raw != nil + } + + err := validateExactlyOneAttribute(k, schema, c) + if err != nil { + return nil, []error{err} + } + + err = validateAtLeastOneAttribute(k, schema, c) + if err != nil { + return nil, []error{err} + } + + if !ok { + if schema.Required { + return nil, []error{fmt.Errorf( + "%q: required field is not set", k)} + } + return nil, nil + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return nil, []error{fmt.Errorf( + "%q: this field cannot be set", k)} + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil + } + return nil, nil + } + + err = validateConflictingAttributes(k, schema, c) + if err != nil { + return nil, []error{err} + } + + return m.validateType(k, raw, schema, c) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func removeDuplicates(elements []string) []string { + encountered := make(map[string]struct{}, 0) + result := []string{} + + for v := range elements { + if _, ok := encountered[elements[v]]; !ok { + encountered[elements[v]] = struct{}{} + result = append(result, elements[v]) + } + } + + return result +} + +func validateExactlyOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ExactlyOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.ExactlyOneOf, k)) + sort.Strings(allKeys) + specified := make([]string, 0) + unknownVariableValueCount := 0 + for _, exactlyOneOfKey := range allKeys { + if c.IsComputed(exactlyOneOfKey) { + unknownVariableValueCount++ + continue + } + + _, ok := c.Get(exactlyOneOfKey) + if ok { + specified = append(specified, exactlyOneOfKey) + } + } + + if len(specified) == 0 && unknownVariableValueCount == 0 { + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + + if len(specified) > 1 { + return fmt.Errorf("%q: only one of `%s` can be specified, but `%s` were specified.", k, strings.Join(allKeys, ","), strings.Join(specified, ",")) + } + + return nil +} + +func validateAtLeastOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.AtLeastOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.AtLeastOneOf, k)) + sort.Strings(allKeys) + + for _, atLeastOneOfKey := range allKeys { + if _, ok := c.Get(atLeastOneOfKey); ok { + // We can ignore hcl2shim.UnknownVariable by assuming it's been set and additional validation elsewhere + // will uncover this if it is in fact null. + return nil + } + } + + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + // If we support promotion and the raw value isn't a slice, wrap + // it in []interface{} and check again. + if schema.PromoteSingle && rawV.Kind() != reflect.Slice { + raw = []interface{}{raw} + rawV = reflect.ValueOf(raw) + } + + if rawV.Kind() != reflect.Slice { + return nil, []error{fmt.Errorf( + "%s: should be a list", k)} + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if isProto5() && !isWhollyKnown(raw) { + return nil, nil + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + var ws []string + var es []error + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + var ws2 []string + var es2 []error + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + ws2, es2 = m.validateObject(key, t.Schema, c) + case *Schema: + ws2, es2 = m.validateType(key, raw, t, c) + } + + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + return ws, es +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return nil, nil + } + } + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil + case reflect.Map: + case reflect.Slice: + default: + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + if schema.ValidateFunc != nil { + return schema.ValidateFunc(mapIface, k) + } + return nil, nil + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return nil, []error{fmt.Errorf( + "%s: should be a map", k)} + } + mapIface := v.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + } + + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + + return nil, nil +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { + for key, raw := range m { + valueType, err := getValueType(k, schema) + if err != nil { + return nil, []error{err} + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return nil, nil +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return nil, nil + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return nil, []error{fmt.Errorf( + "%s: expected object, got %s", + k, reflect.ValueOf(raw).Kind())} + } + + var ws []string + var es []error + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + + ws2, es2 := m.validate(key, s, c) + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk, _ := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + es = append(es, fmt.Errorf( + "%s: invalid or unknown key: %s", k, subk)) + } + } + } + + return ws, es +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return nil, nil + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a list", k), + } + case reflect.Map: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a map", k), + } + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return nil, nil + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeInt: + switch { + case isProto5(): + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} + } + default: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + if schema.ValidateFunc != nil { + return schema.ValidateFunc(decoded, k) + } + + return nil, nil +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + var ws []string + var es []error + switch schema.Type { + case TypeSet, TypeList: + ws, es = m.validateList(k, raw, schema, c) + case TypeMap: + ws, es = m.validateMap(k, raw, schema, c) + default: + ws, es = m.validatePrimitive(k, raw, schema, c) + } + + if schema.Deprecated != "" { + ws = append(ws, fmt.Sprintf( + "%q: [DEPRECATED] %s", k, schema.Deprecated)) + } + + if schema.Removed != "" { + es = append(es, fmt.Errorf( + "%q: [REMOVED] %s", k, schema.Removed)) + } + + return ws, es +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go new file mode 100644 index 000000000..fe6d7504c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/serialize.go @@ -0,0 +1,125 @@ +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.Schema + m := val.(map[string]interface{}) + var keys []string + for k := range sm { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go new file mode 100644 index 000000000..daa431ddb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/set.go @@ -0,0 +1,246 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + return reflect.DeepEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go new file mode 100644 index 000000000..93c601f80 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/shims.go @@ -0,0 +1,115 @@ +package schema + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == hcl2shim.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go new file mode 100644 index 000000000..4d0fd7365 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/testing.go @@ -0,0 +1,28 @@ +package schema + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw( + t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go new file mode 100644 index 000000000..0f65d692f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype.go @@ -0,0 +1,21 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go new file mode 100644 index 000000000..914ca32cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/schema/valuetype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypeBool-1] + _ = x[TypeInt-2] + _ = x[TypeFloat-3] + _ = x[TypeString-4] + _ = x[TypeList-5] + _ = x[TypeMap-6] + _ = x[TypeSet-7] + _ = x[typeObject-8] +} + +const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" + +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go new file mode 100644 index 000000000..36b494c01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/httpclient/useragent.go @@ -0,0 +1,26 @@ +package httpclient + +import ( + "fmt" + "log" + "os" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/meta" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func TerraformUserAgent(version string) string { + ua := fmt.Sprintf("HashiCorp Terraform/%s (+https://www.terraform.io) Terraform Plugin SDK/%s", version, meta.SDKVersionString()) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go new file mode 100644 index 000000000..90a5faf0e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/count_attr.go @@ -0,0 +1,12 @@ +package addrs + +// CountAttr is the address of an attribute of the "count" object in +// the interpolation scope, like "count.index". +type CountAttr struct { + referenceable + Name string +} + +func (ca CountAttr) String() string { + return "count." + ca.Name +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go new file mode 100644 index 000000000..46093314f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/doc.go @@ -0,0 +1,17 @@ +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go new file mode 100644 index 000000000..7a6385035 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/for_each_attr.go @@ -0,0 +1,12 @@ +package addrs + +// ForEachAttr is the address of an attribute referencing the current "for_each" object in +// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value" +type ForEachAttr struct { + referenceable + Name string +} + +func (f ForEachAttr) String() string { + return "each." + f.Name +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go new file mode 100644 index 000000000..d2c046c11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/input_variable.go @@ -0,0 +1,41 @@ +package addrs + +import ( + "fmt" +) + +// InputVariable is the address of an input variable. +type InputVariable struct { + referenceable + Name string +} + +func (v InputVariable) String() string { + return "var." + v.Name +} + +// AbsInputVariableInstance is the address of an input variable within a +// particular module instance. +type AbsInputVariableInstance struct { + Module ModuleInstance + Variable InputVariable +} + +// InputVariable returns the absolute address of the input variable of the +// given name inside the receiving module instance. +func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: InputVariable{ + Name: name, + }, + } +} + +func (v AbsInputVariableInstance) String() string { + if len(v.Module) == 0 { + return v.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go new file mode 100644 index 000000000..cef8b2796 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/instance_key.go @@ -0,0 +1,123 @@ +package addrs + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// InstanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// IntKey and StringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type InstanceKey interface { + instanceKeySigil() + String() string +} + +// ParseInstanceKey returns the instance key corresponding to the given value, +// which must be known and non-null. +// +// If an unknown or null value is provided then this function will panic. This +// function is intended to deal with the values that would naturally be found +// in a hcl.TraverseIndex, which (when parsed from source, at least) can never +// contain unknown or null values. +func ParseInstanceKey(key cty.Value) (InstanceKey, error) { + switch key.Type() { + case cty.String: + return StringKey(key.AsString()), nil + case cty.Number: + var idx int + err := gocty.FromCtyValue(key, &idx) + return IntKey(idx), err + default: + return NoKey, fmt.Errorf("either a string or an integer is required") + } +} + +// NoKey represents the absense of an InstanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey InstanceKey + +// IntKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type IntKey int + +func (k IntKey) instanceKeySigil() { +} + +func (k IntKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// StringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type StringKey string + +func (k StringKey) instanceKeySigil() { +} + +func (k StringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} + +// InstanceKeyLess returns true if the first given instance key i should sort +// before the second key j, and false otherwise. +func InstanceKeyLess(i, j InstanceKey) bool { + iTy := instanceKeyType(i) + jTy := instanceKeyType(j) + + switch { + case i == j: + return false + case i == NoKey: + return true + case j == NoKey: + return false + case iTy != jTy: + // The ordering here is arbitrary except that we want NoKeyType + // to sort before the others, so we'll just use the enum values + // of InstanceKeyType here (where NoKey is zero, sorting before + // any other). + return uint32(iTy) < uint32(jTy) + case iTy == IntKeyType: + return int(i.(IntKey)) < int(j.(IntKey)) + case iTy == StringKeyType: + return string(i.(StringKey)) < string(j.(StringKey)) + default: + // Shouldn't be possible to get down here in practice, since the + // above is exhaustive. + return false + } +} + +func instanceKeyType(k InstanceKey) InstanceKeyType { + if _, ok := k.(StringKey); ok { + return StringKeyType + } + if _, ok := k.(IntKey); ok { + return IntKeyType + } + return NoKeyType +} + +// InstanceKeyType represents the different types of instance key that are +// supported. Usually it is sufficient to simply type-assert an InstanceKey +// value to either IntKey or StringKey, but this type and its values can be +// used to represent the types themselves, rather than specific values +// of those types. +type InstanceKeyType rune + +const ( + NoKeyType InstanceKeyType = 0 + IntKeyType InstanceKeyType = 'I' + StringKeyType InstanceKeyType = 'S' +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go new file mode 100644 index 000000000..61a07b9c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/local_value.go @@ -0,0 +1,48 @@ +package addrs + +import ( + "fmt" +) + +// LocalValue is the address of a local value. +type LocalValue struct { + referenceable + Name string +} + +func (v LocalValue) String() string { + return "local." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: v, + } +} + +// AbsLocalValue is the absolute address of a local value within a module instance. +type AbsLocalValue struct { + Module ModuleInstance + LocalValue LocalValue +} + +// LocalValue returns the absolute address of a local value of the given +// name within the receiving module instance. +func (m ModuleInstance) LocalValue(name string) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: LocalValue{ + Name: name, + }, + } +} + +func (v AbsLocalValue) String() string { + if len(v.Module) == 0 { + return v.LocalValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go new file mode 100644 index 000000000..1533f853c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module.go @@ -0,0 +1,51 @@ +package addrs + +import ( + "strings" +) + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string + +// IsRoot returns true if the receiver is the address of the root module, +// or false otherwise. +func (m Module) IsRoot() bool { + return len(m) == 0 +} + +func (m Module) String() string { + if len(m) == 0 { + return "" + } + return strings.Join([]string(m), ".") +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m Module) Call() (Module, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + caller, callName := m[:len(m)-1], m[len(m)-1] + return caller, ModuleCall{ + Name: callName, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go new file mode 100644 index 000000000..d138fade7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_call.go @@ -0,0 +1,63 @@ +package addrs + +import ( + "fmt" +) + +// ModuleCall is the address of a call from the current module to a child +// module. +// +// There is no "Abs" version of ModuleCall because an absolute module path +// is represented by ModuleInstance. +type ModuleCall struct { + referenceable + Name string +} + +func (c ModuleCall) String() string { + return "module." + c.Name +} + +// ModuleCallInstance is the address of one instance of a module created from +// a module call, which might create multiple instances using "count" or +// "for_each" arguments. +type ModuleCallInstance struct { + referenceable + Call ModuleCall + Key InstanceKey +} + +func (c ModuleCallInstance) String() string { + if c.Key == NoKey { + return c.Call.String() + } + return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) +} + +// ModuleInstance returns the address of the module instance that corresponds +// to the receiving call instance when resolved in the given calling module. +// In other words, it returns the child module instance that the receving +// call instance creates. +func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { + return caller.Child(c.Call.Name, c.Key) +} + +// ModuleCallOutput is the address of a particular named output produced by +// an instance of a module call. +type ModuleCallOutput struct { + referenceable + Call ModuleCallInstance + Name string +} + +func (co ModuleCallOutput) String() string { + return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) +} + +// AbsOutputValue returns the absolute output value address that corresponds +// to the receving module call output address, once resolved in the given +// calling module. +func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { + moduleAddr := co.Call.ModuleInstance(caller) + return moduleAddr.OutputValue(co.Name) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go new file mode 100644 index 000000000..bb0901a26 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/module_instance.go @@ -0,0 +1,388 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +var ( + _ Targetable = ModuleInstance(nil) +) + +func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "A module instance address must begin with \"module.\".", + Subject: remain.SourceRange().Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "The module instance address is followed by additional invalid content.", + Subject: remain.SourceRange().Ptr(), + }) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := ParseModuleInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Module address prefix must be followed by dot and then a name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + + if next != "module" { + break + } + + kwRange := remain[0].SourceRange() + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: &kwRange, + }) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = StringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = IntKey(idxInt) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: fmt.Sprintf("Invalid module index: %s.", err), + Subject: idx.SourceRange().Ptr(), + }) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Invalid module key: must be either a string or an integer.", + Subject: idx.SourceRange().Ptr(), + }) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey InstanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// IsRoot returns true if the receiver is the address of the root module instance, +// or false otherwise. +func (m ModuleInstance) IsRoot() bool { + return len(m) == 0 +} + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// Parent returns the address of the parent module instance of the receiver, or +// the receiver itself if there is no parent (if it's the root module address). +func (m ModuleInstance) Parent() ModuleInstance { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (m ModuleInstance) Less(o ModuleInstance) bool { + if len(m) != len(o) { + // Shorter path sorts first. + return len(m) < len(o) + } + + for i := range m { + mS, oS := m[i], o[i] + switch { + case mS.Name != oS.Name: + return mS.Name < oS.Name + case mS.InstanceKey != oS.InstanceKey: + return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) + } + } + + return false +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// module instances, all the way up to (and including) the root module. +// The result is ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m ModuleInstance) Ancestors() []ModuleInstance { + ret := make([]ModuleInstance, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module instance that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// A single module call can produce potentially many module instances, so the +// result discards any instance key that might be present on the last step +// of the instance. To retain this, use CallInstance instead. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCall{ + Name: lastStep.Name, + } +} + +// CallInstance returns the module call instance address that corresponds to +// the given module instance, along with the address of the module instance +// that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCallInstance and then returns a slice of the receiever that excludes +// that last part. This is just a convenience for situations where a call\ +// address is required, such as when dealing with *Reference and Referencable +// values. +func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { + if len(m) == 0 { + panic("cannot produce ModuleCallInstance for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCallInstance{ + Call: ModuleCall{ + Name: lastStep.Name, + }, + Key: lastStep.InstanceKey, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the reciever. +func (m ModuleInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case ModuleInstance: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + if ourStep != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +func (m ModuleInstance) targetableSigil() { + // ModuleInstance is targetable +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go new file mode 100644 index 000000000..bcd923acb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/output_value.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "fmt" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Since output +// values cannot be represented from the module where they are defined, +// OutputValue is not Referenceable, while ModuleCallOutput is. +type OutputValue struct { + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go new file mode 100644 index 000000000..a2ee16441 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_ref.go @@ -0,0 +1,346 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Reference describes a reference to an address with source location +// information. +type Reference struct { + Subject Referenceable + SourceRange tfdiags.SourceRange + Remaining hcl.Traversal +} + +// ParseRef attempts to extract a referencable address from the prefix of the +// given traversal, which must be an absolute traversal or this function +// will panic. +// +// If no error diagnostics are returned, the returned reference includes the +// address that was extracted, the source range it was extracted from, and any +// remaining relative traversal that was not consumed as part of the +// reference. +// +// If error diagnostics are returned then the Reference value is invalid and +// must not be used. +func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + ref, diags := parseRef(traversal) + + // Normalize a little to make life easier for callers. + if ref != nil { + if len(ref.Remaining) == 0 { + ref.Remaining = nil + } + } + + return ref, diags +} + +// ParseRefStr is a helper wrapper around ParseRef that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseRef. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned reference may be nil or incomplete. +func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRef(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + switch root { + + case "count": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: CountAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "each": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: ForEachAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "data": + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser + return parseResourceRef(DataResourceMode, rootRange, remain) + + case "local": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: LocalValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "module": + callName, callRange, remain, diags := parseSingleAttrRef(traversal) + if diags.HasErrors() { + return nil, diags + } + + // A traversal starting with "module" can either be a reference to + // an entire module instance or to a single output from a module + // instance, depending on what we find after this introducer. + + callInstance := ModuleCallInstance{ + Call: ModuleCall{ + Name: callName, + }, + Key: NoKey, + } + + if len(remain) == 0 { + // Reference to an entire module instance. Might alternatively + // be a reference to a collection of instances of a particular + // module, but the caller will need to deal with that ambiguity + // since we don't have enough context here. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(callRange), + Remaining: remain, + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + callInstance.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + + if len(remain) == 0 { + // Also a reference to an entire module instance, but we have a key + // now. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), + Remaining: remain, + }, diags + } + } + + if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { + remain = remain[1:] + return &Reference{ + Subject: ModuleCallOutput{ + Name: attrTrav.Name, + Call: callInstance, + }, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), + Remaining: remain, + }, diags + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: "Module instance objects do not support this operation.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + + case "path": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: PathAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "self": + return &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + Remaining: traversal[1:], + }, diags + + case "terraform": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: TerraformAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "var": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: InputVariable{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + default: + return parseResourceRef(ManagedResourceMode, rootRange, traversal) + } +} + +func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + // If it isn't a TraverseRoot then it must be a "data" reference. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object does not support this operation.`, + Subject: traversal[0].SourceRange().Ptr(), + }) + return nil, diags + } + + attrTrav, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + var what string + switch mode { + case DataResourceMode: + what = "data source" + default: + what = "resource type" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), + Subject: traversal[1].SourceRange().Ptr(), + }) + return nil, diags + } + name = attrTrav.Name + rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) + remain := traversal[2:] + + resourceAddr := Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + resourceInstAddr := ResourceInstance{ + Resource: resourceAddr, + Key: NoKey, + } + + if len(remain) == 0 { + // This might actually be a reference to the collection of all instances + // of the resource, but we don't have enough context here to decide + // so we'll let the caller resolve that ambiguity. + return &Reference{ + Subject: resourceAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + rng = hcl.RangeBetween(rng, idxTrav.SrcRange) + } + + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags +} + +func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), + Subject: &rootRange, + }) + return "", hcl.Range{}, nil, diags + } + if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { + return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object does not support this operation.", root), + Subject: traversal[1].SourceRange().Ptr(), + }) + return "", hcl.Range{}, nil, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go new file mode 100644 index 000000000..5b922e8b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/parse_target.go @@ -0,0 +1,240 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Target describes a targeted address with source location information. +type Target struct { + Subject Targetable + SourceRange tfdiags.SourceRange +} + +// ParseTarget attempts to interpret the given traversal as a targetable +// address. The given traversal must be absolute, or this function will +// panic. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the Target value is invalid and +// must not be used. +func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &Target{ + Subject: path, + SourceRange: rng, + }, diags + } + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + if len(remain) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource specification must include a resource type and name.", + Subject: remain.SourceRange().Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + switch mode { + case ManagedResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource type name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + case DataResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A data source name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + default: + panic("unknown mode") + } + return nil, diags + } + + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + var subject Targetable + remain = remain[2:] + switch len(remain) { + case 0: + subject = path.Resource(mode, typeName, name) + case 1: + if tt, ok := remain[0].(hcl.TraverseIndex); ok { + key, err := ParseInstanceKey(tt.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + + subject = path.ResourceInstance(mode, typeName, name, key) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource instance key must be given in square brackets.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +// ParseTargetStr is a helper wrapper around ParseTarget that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a target string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseTarget. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned target may be nil or incomplete. +func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + target, targetDiags := ParseTarget(traversal) + diags = diags.Append(targetDiags) + return target, diags +} + +// ParseAbsResourceInstance attempts to interpret the given traversal as an +// absolute resource instance address, using the same syntax as expected by +// ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt.Instance(NoKey), diags + + case AbsResourceInstance: + return tt, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + } +} + +// ParseAbsResourceInstanceStr is a helper wrapper around +// ParseAbsResourceInstance that takes a string and parses it with the HCL +// native syntax traversal parser before interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResourceInstance{}, diags + } + + addr, addrDiags := ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go new file mode 100644 index 000000000..cfc13f4bc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/path_attr.go @@ -0,0 +1,12 @@ +package addrs + +// PathAttr is the address of an attribute of the "path" object in +// the interpolation scope, like "path.module". +type PathAttr struct { + referenceable + Name string +} + +func (pa PathAttr) String() string { + return "path." + pa.Name +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go new file mode 100644 index 000000000..c6fce1a50 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_config.go @@ -0,0 +1,289 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// ProviderConfig is the address of a provider configuration. +type ProviderConfig struct { + Type string + + // If not empty, Alias identifies which non-default (aliased) provider + // configuration this address refers to. + Alias string +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// aws +// aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := ProviderConfig{ + Type: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return ProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// Absolute returns an AbsProviderConfig from the receiver and the given module +// instance address. +func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { + return AbsProviderConfig{ + Module: module, + ProviderConfig: pc, + } +} + +func (pc ProviderConfig) String() string { + if pc.Type == "" { + // Should never happen; always indicates a bug + return "provider." + } + + if pc.Alias != "" { + return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) + } + + return "provider." + pc.Type +} + +// StringCompact is an alternative to String that returns the form that can +// be parsed by ParseProviderConfigCompact, without the "provider." prefix. +func (pc ProviderConfig) StringCompact() string { + if pc.Alias != "" { + return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) + } + return pc.Type +} + +// AbsProviderConfig is the absolute address of a provider configuration +// within a particular module instance. +type AbsProviderConfig struct { + Module ModuleInstance + ProviderConfig ProviderConfig +} + +// ParseAbsProviderConfig parses the given traversal as an absolute provider +// address. The following are examples of traversals that can be successfully +// parsed as absolute provider configuration addresses: +// +// provider.aws +// provider.aws.foo +// module.bar.provider.aws +// module.bar.module.baz.provider.aws.foo +// module.foo[1].provider.aws.foo +// +// This type of address is used, for example, to record the relationships +// between resources and provider configurations in the state structure. +// This type of address is not generally used in the UI, except in error +// messages that refer to provider configurations. +func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + ret := AbsProviderConfig{ + Module: modInst, + } + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + if tt, ok := remain[1].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Type = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseAbsProviderConfig. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address is invalid. +func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + + addr, addrDiags := ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ProviderConfigDefault returns the address of the default provider config +// of the given type inside the recieving module instance. +func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + }, + } +} + +// ProviderConfigAliased returns the address of an aliased provider config +// of with given type and alias inside the recieving module instance. +func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + Alias: alias, + }, + } +} + +// Inherited returns an address that the receiving configuration address might +// inherit from in a parent module. The second bool return value indicates if +// such inheritance is possible, and thus whether the returned address is valid. +// +// Inheritance is possible only for default (un-aliased) providers in modules +// other than the root module. Even if a valid address is returned, inheritence +// may not be performed for other reasons, such as if the calling module +// provided explicit provider configurations within the call for this module. +// The ProviderTransformer graph transform in the main terraform module has +// the authoritative logic for provider inheritance, and this method is here +// mainly just for its benefit. +func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { + // Can't inherit if we're already in the root. + if len(pc.Module) == 0 { + return AbsProviderConfig{}, false + } + + // Can't inherit if we have an alias. + if pc.ProviderConfig.Alias != "" { + return AbsProviderConfig{}, false + } + + // Otherwise, we might inherit from a configuration with the same + // provider name in the parent module instance. + parentMod := pc.Module.Parent() + return pc.ProviderConfig.Absolute(parentMod), true +} + +func (pc AbsProviderConfig) String() string { + if len(pc.Module) == 0 { + return pc.ProviderConfig.String() + } + return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go new file mode 100644 index 000000000..64b8ac869 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/provider_type.go @@ -0,0 +1,7 @@ +package addrs + +// ProviderType encapsulates a single provider type. In the future this will be +// extended to include additional fields including Namespace and SourceHost +type ProviderType struct { + Name string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go new file mode 100644 index 000000000..211083a5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/referenceable.go @@ -0,0 +1,20 @@ +package addrs + +// Referenceable is an interface implemented by all address types that can +// appear as references in configuration language expressions. +type Referenceable interface { + // All implementations of this interface must be covered by the type switch + // in lang.Scope.buildEvalContext. + referenceableSigil() + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseRef to produce an identical + // result. + String() string +} + +type referenceable struct { +} + +func (r referenceable) referenceableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go new file mode 100644 index 000000000..103f8a28c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource.go @@ -0,0 +1,254 @@ +package addrs + +import ( + "fmt" + "strings" +) + +// Resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type Resource struct { + referenceable + Mode ResourceMode + Type string + Name string +} + +func (r Resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r Resource) Instance(key InstanceKey) ResourceInstance { + return ResourceInstance{ + Resource: r, + Key: key, + } +} + +// Absolute returns an AbsResource from the receiver and the given module +// instance address. +func (r Resource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r, + } +} + +// DefaultProviderConfig returns the address of the provider configuration +// that should be used for the resource identified by the reciever if it +// does not have a provider configuration address explicitly set in +// configuration. +// +// This method is not able to verify that such a configuration exists, nor +// represent the behavior of automatically inheriting certain provider +// configurations from parent modules. It just does a static analysis of the +// receiving address and returns an address to start from, relative to the +// same module that contains the resource. +func (r Resource) DefaultProviderConfig() ProviderConfig { + typeName := r.Type + if under := strings.Index(typeName, "_"); under != -1 { + typeName = typeName[:under] + } + return ProviderConfig{ + Type: typeName, + } +} + +// ResourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type ResourceInstance struct { + referenceable + Resource Resource + Key InstanceKey +} + +func (r ResourceInstance) ContainingResource() Resource { + return r.Resource +} + +func (r ResourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +// Absolute returns an AbsResourceInstance from the receiver and the given module +// instance address. +func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { + return AbsResourceInstance{ + Module: module, + Resource: r, + } +} + +// AbsResource is an absolute address for a resource under a given module path. +type AbsResource struct { + targetable + Module ModuleInstance + Resource Resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { + return AbsResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: r.Module, + Resource: r.Resource.Instance(key), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r AbsResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + + default: + return false + + } +} + +func (r AbsResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// AbsResourceInstance is an absolute address for a resource instance under a +// given module path. +type AbsResourceInstance struct { + targetable + Module ModuleInstance + Resource ResourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: m, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an AbsResource value. +func (r AbsResourceInstance) ContainingResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is equal to the receiver. +func (r AbsResourceInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResourceInstance: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + default: + return false + + } +} + +func (r AbsResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { + switch { + + case len(r.Module) != len(o.Module): + return len(r.Module) < len(o.Module) + + case r.Module.String() != o.Module.String(): + return r.Module.Less(o.Module) + + case r.Resource.Resource.Mode != o.Resource.Resource.Mode: + return r.Resource.Resource.Mode == DataResourceMode + + case r.Resource.Resource.Type != o.Resource.Resource.Type: + return r.Resource.Resource.Type < o.Resource.Resource.Type + + case r.Resource.Resource.Name != o.Resource.Resource.Name: + return r.Resource.Resource.Name < o.Resource.Resource.Name + + case r.Resource.Key != o.Resource.Key: + return InstanceKeyLess(r.Resource.Key, o.Resource.Key) + + default: + return false + + } +} + +// ResourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type ResourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode ResourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode ResourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode ResourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go new file mode 100644 index 000000000..9bdbdc421 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resource_phase.go @@ -0,0 +1,105 @@ +package addrs + +import "fmt" + +// ResourceInstancePhase is a special kind of reference used only internally +// during graph building to represent resource instances that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via an instance phase +// or can declare that they reference an instance phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourceInstancePhase struct { + referenceable + ResourceInstance ResourceInstance + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourceInstancePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { + return ResourceInstancePhase{ + ResourceInstance: r, + Phase: rpt, + } +} + +// ContainingResource returns an address for the same phase of the resource +// that this instance belongs to. +func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { + return rp.ResourceInstance.Resource.Phase(rp.Phase) +} + +func (rp ResourceInstancePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) +} + +// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. +type ResourceInstancePhaseType string + +const ( + // ResourceInstancePhaseDestroy represents the "destroy" phase of a + // resource instance. + ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" + + // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy + // but is used for resources that have "create_before_destroy" set, thus + // requiring a different dependency ordering. + ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" +) + +func (rpt ResourceInstancePhaseType) String() string { + return string(rpt) +} + +// ResourcePhase is a special kind of reference used only internally +// during graph building to represent resources that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via a resource phase +// or can declare that they reference a resource phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// Since resources (as opposed to instances) aren't actually phased, this +// address type is used only as an approximation during initial construction +// of the resource-oriented plan graph, under the assumption that resource +// instances with ResourceInstancePhase addresses will be created in dynamic +// subgraphs during the graph walk. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourcePhase struct { + referenceable + Resource Resource + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourcePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { + return ResourcePhase{ + Resource: r, + Phase: rpt, + } +} + +func (rp ResourcePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go new file mode 100644 index 000000000..0b5c33f8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _ResourceMode_name_0 = "InvalidResourceMode" + _ResourceMode_name_1 = "DataResourceMode" + _ResourceMode_name_2 = "ManagedResourceMode" +) + +func (i ResourceMode) String() string { + switch { + case i == 0: + return _ResourceMode_name_0 + case i == 68: + return _ResourceMode_name_1 + case i == 77: + return _ResourceMode_name_2 + default: + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go new file mode 100644 index 000000000..7f24eaf08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/self.go @@ -0,0 +1,14 @@ +package addrs + +// Self is the address of the special object "self" that behaves as an alias +// for a containing object currently in scope. +const Self selfT = 0 + +type selfT int + +func (s selfT) referenceableSigil() { +} + +func (s selfT) String() string { + return "self" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go new file mode 100644 index 000000000..16819a5af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/targetable.go @@ -0,0 +1,26 @@ +package addrs + +// Targetable is an interface implemented by all address types that can be +// used as "targets" for selecting sub-graphs of a graph. +type Targetable interface { + targetableSigil() + + // TargetContains returns true if the receiver is considered to contain + // the given other address. Containment, for the purpose of targeting, + // means that if a container address is targeted then all of the + // addresses within it are also implicitly targeted. + // + // A targetable address always contains at least itself. + TargetContains(other Targetable) bool + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseTarget to produce an + // identical result. + String() string +} + +type targetable struct { +} + +func (r targetable) targetableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go new file mode 100644 index 000000000..a880182ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/addrs/terraform_attr.go @@ -0,0 +1,12 @@ +package addrs + +// TerraformAttr is the address of an attribute of the "terraform" object in +// the interpolation scope, like "terraform.workspace". +type TerraformAttr struct { + referenceable + Name string +} + +func (ta TerraformAttr) String() string { + return "terraform." + ta.Name +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go new file mode 100644 index 000000000..c054acf0a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diagnostic.go @@ -0,0 +1,295 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcled" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + case tfdiags.Warning: + buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) + + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + var src []byte + if sources != nil { + src = sources[snippetRange.Filename] + } + if src == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) + } else { + file, offset := parseRange(src, highlightRange) + + headerRange := highlightRange + + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + contextStr = ", in " + contextStr + } + + fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) + + // Config snippet rendering + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snippetRange) { + continue + } + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), + lineRange.Start.Line, + before, highlighted, after, + ) + } + + } + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue Traversals + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) + } + seen[traversalStr] = struct{}{} + } + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + + if len(stmts) > 0 { + fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) + } + for _, stmt := range stmts { + fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) + } + } + + buf.WriteByte('\n') + } + + if desc.Detail != "" { + detail := desc.Detail + if width != 0 { + detail = wordwrap.WrapString(detail, uint(width)) + } + fmt.Fprintf(&buf, "%s\n", detail) + } + + return buf.String() +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + var diags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, diags = parser.ParseJSON(src, filename) + } else { + file, diags = parser.ParseHCL(src, filename) + } + if diags.HasErrors() { + return file, offset + } + + return file, offset +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go new file mode 100644 index 000000000..0a2aa7d02 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/diff.go @@ -0,0 +1,1192 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ResourceChange returns a string representation of a change to a particular +// resource, for inclusion in user-facing plan output. +// +// The resource schema must be provided along with the change so that the +// formatted change can reflect the configuration structure for the associated +// resource. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func ResourceChange( + change *plans.ResourceInstanceChangeSrc, + tainted bool, + schema *configschema.Block, + color *colorstring.Colorize, +) string { + addr := change.Addr + var buf bytes.Buffer + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: false, + } + } + + dispAddr := addr.String() + if change.DeposedKey != states.NotDeposed { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) + } + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) + case plans.Read: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) + case plans.Update: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) + case plans.CreateThenDelete, plans.DeleteThenCreate: + if tainted { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) + } else { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) + } + case plans.Delete: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString(color.Color("[reset]\n")) + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color("[green] +[reset] ")) + case plans.Read: + buf.WriteString(color.Color("[cyan] <=[reset] ")) + case plans.Update: + buf.WriteString(color.Color("[yellow] ~[reset] ")) + case plans.DeleteThenCreate: + buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) + case plans.CreateThenDelete: + buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) + case plans.Delete: + buf.WriteString(color.Color("[red] -[reset] ")) + default: + buf.WriteString(color.Color("??? ")) + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + buf.WriteString(fmt.Sprintf( + "resource %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + case addrs.DataResourceMode: + buf.WriteString(fmt.Sprintf( + "data %q %q ", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + default: + // should never happen, since the above is exhaustive + buf.WriteString(addr.String()) + } + + buf.WriteString(" {") + + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: change.Action, + requiredReplace: change.RequiredReplace, + } + + // Most commonly-used resources have nested blocks that result in us + // going at least three traversals deep while we recurse here, so we'll + // start with that much capacity and then grow as needed for deeper + // structures. + path := make(cty.Path, 0, 3) + + changeV, err := change.Decode(schema.ImpliedType()) + if err != nil { + // Should never happen in here, since we've already been through + // loads of layers of encode/decode of the planned changes before now. + panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) + } + + // We currently have an opt-out that permits the legacy SDK to return values + // that defy our usual conventions around handling of nesting blocks. To + // avoid the rendering code from needing to handle all of these, we'll + // normalize first. + // (Ideally we'd do this as part of the SDK opt-out implementation in core, + // but we've added it here for now to reduce risk of unexpected impacts + // on other code in core.) + changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) + changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) + + bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) + if bodyWritten { + buf.WriteString("\n") + buf.WriteString(strings.Repeat(" ", 4)) + } + buf.WriteString("}\n") + + return buf.String() +} + +type blockBodyDiffPrinter struct { + buf *bytes.Buffer + color *colorstring.Colorize + action plans.Action + requiredReplace cty.PathSet +} + +const forcesNewResourceCaption = " [red]# forces replacement[reset]" + +// writeBlockBodyDiff writes attribute or block differences +// and returns true if any differences were found and written +func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { + path = ctyEnsurePathCapacity(path, 1) + + bodyWritten := false + blankBeforeBlocks := false + { + attrNames := make([]string, 0, len(schema.Attributes)) + attrNameLen := 0 + for name := range schema.Attributes { + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + if oldVal.IsNull() && newVal.IsNull() { + // Skip attributes where both old and new values are null + // (we do this early here so that we'll do our value alignment + // based on the longest attribute name that has a change, rather + // than the longest attribute name in the full set.) + continue + } + + attrNames = append(attrNames, name) + if len(name) > attrNameLen { + attrNameLen = len(name) + } + } + sort.Strings(attrNames) + if len(attrNames) > 0 { + blankBeforeBlocks = true + } + + for _, name := range attrNames { + attrS := schema.Attributes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) + } + } + + { + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + + for _, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) + + // Always include a blank for any subsequent block types. + blankBeforeBlocks = true + } + } + + return bodyWritten +} + +func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + showJustNew := false + var action plans.Action + switch { + case old.IsNull(): + action = plans.Create + showJustNew = true + case new.IsNull(): + action = plans.Delete + case ctyEqualWithUnknown(old, new): + action = plans.NoOp + showJustNew = true + default: + action = plans.Update + } + + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + p.buf.WriteString(" = ") + + if attrS.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + switch { + case showJustNew: + p.writeValue(new, action, indent+2) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + default: + // We show new even if it is null to emphasize the fact + // that it is being unset, since otherwise it is easy to + // misunderstand that the value is still set to the old value. + p.writeValueDiff(old, new, indent+2, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + if old.IsNull() && new.IsNull() { + // Nothing to do if both old and new is null + return + } + + // Where old/new are collections representing a nesting mode other than + // NestingSingle, we assume the collection value can never be unknown + // since we always produce the container for the nested objects, even if + // the objects within are computed. + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + var action plans.Action + eqV := new.Equals(old) + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !eqV.IsKnown() || !eqV.True(): + action = plans.Update + } + + if blankBefore { + p.buf.WriteRune('\n') + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) + case configschema.NestingList: + // For the sake of handling nested blocks, we'll treat a null list + // the same as an empty list since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockListAsEmpty(old) + new = ctyNullBlockListAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. + var commonLen int + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + default: + commonLen = len(newItems) + } + + if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { + p.buf.WriteRune('\n') + } + + for i := 0; i < commonLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := newItems[i] + action := plans.Update + if oldItem.RawEquals(newItem) { + action = plans.NoOp + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(oldItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := cty.NullVal(oldItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(newItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + newItem := newItems[i] + oldItem := cty.NullVal(newItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) + } + case configschema.NestingSet: + // For the sake of handling nested blocks, we'll treat a null set + // the same as an empty set since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockSetAsEmpty(old) + new = ctyNullBlockSetAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both sets are empty + return + } + + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + all := cty.SetVal(allItems) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + path := append(path, cty.IndexStep{Key: val}) + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + newItems := new.AsValueMap() + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both maps are empty + return + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + if label != nil { + fmt.Fprintf(p.buf, "%s %q {", name, *label) + } else { + fmt.Fprintf(p.buf, "%s {", name) + } + + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) + if bodyWritten { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + } + p.buf.WriteString("}") +} + +func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { + if !val.IsKnown() { + p.buf.WriteString("(known after apply)") + return + } + if val.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) + return + } + + ty := val.Type() + + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + { + // Special behavior for JSON strings containing array or object + src := []byte(val.AsString()) + ty, err := ctyjson.ImpliedType(src) + // check for the special case of "null", which decodes to nil, + // and just allow it to be printed out directly + if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { + jv, err := ctyjson.Unmarshal(src, ty) + if err == nil { + p.buf.WriteString("jsonencode(") + if jv.LengthInt() == 0 { + p.writeValue(jv, action, 0) + } else { + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(jv, action, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteByte(')') + break // don't *also* do the normal behavior below + } + } + } + fmt.Fprintf(p.buf, "%q", val.AsString()) + case cty.Bool: + if val.True() { + p.buf.WriteString("true") + } else { + p.buf.WriteString("false") + } + case cty.Number: + bf := val.AsBigFloat() + p.buf.WriteString(bf.Text('f', -1)) + default: + // should never happen, since the above is exhaustive + fmt.Fprintf(p.buf, "%#v", val) + } + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + p.buf.WriteString("[") + + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",") + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("]") + case ty.IsMapType(): + p.buf.WriteString("{") + + keyLen := 0 + for it := val.ElementIterator(); it.Next(); { + key, _ := it.Element() + if keyStr := key.AsString(); len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + for it := val.ElementIterator(); it.Next(); { + key, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(key, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + case ty.IsObjectType(): + p.buf.WriteString("{") + + atys := ty.AttributeTypes() + attrNames := make([]string, 0, len(atys)) + nameLen := 0 + for attrName := range atys { + attrNames = append(attrNames, attrName) + if len(attrName) > nameLen { + nameLen = len(attrName) + } + } + sort.Strings(attrNames) + + for _, attrName := range attrNames { + val := val.GetAttr(attrName) + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(attrName) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if len(attrNames) > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + } +} + +func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { + ty := old.Type() + typesEqual := ctyTypesEqual(ty, new.Type()) + + // We have some specialized diff implementations for certain complex + // values where it's useful to see a visualization of the diff of + // the nested elements rather than just showing the entire old and + // new values verbatim. + // However, these specialized implementations can apply only if both + // values are known and non-null. + if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { + switch { + case ty == cty.String: + // We have special behavior for both multi-line strings in general + // and for strings that can parse as JSON. For the JSON handling + // to apply, both old and new must be valid JSON. + // For single-line strings that don't parse as JSON we just fall + // out of this switch block and do the default old -> new rendering. + oldS := old.AsString() + newS := new.AsString() + + { + // Special behavior for JSON strings containing object or + // list values. + oldBytes := []byte(oldS) + newBytes := []byte(newS) + oldType, oldErr := ctyjson.ImpliedType(oldBytes) + newType, newErr := ctyjson.ImpliedType(newBytes) + if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { + oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) + newJV, newErr := ctyjson.Unmarshal(newBytes, newType) + if oldErr == nil && newErr == nil { + if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace + p.buf.WriteString("jsonencode(") + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(plans.Update) + p.writeValueDiff(oldJV, newJV, indent+4, path) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } else { + // if they differ only in insigificant whitespace + // then we'll note that but still expand out the + // effective value. + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) + } else { + p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) + } + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(oldJV, plans.NoOp, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } + return + } + } + } + + if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { + break + } + + p.buf.WriteString("<<~EOT") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var oldLines, newLines []cty.Value + { + r := strings.NewReader(oldS) + sc := bufio.NewScanner(r) + for sc.Scan() { + oldLines = append(oldLines, cty.StringVal(sc.Text())) + } + } + { + r := strings.NewReader(newS) + sc := bufio.NewScanner(r) + for sc.Scan() { + newLines = append(newLines, cty.StringVal(sc.Text())) + } + } + + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol + p.buf.WriteString("EOT") + + return + + case ty.IsSetType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var addedVals, removedVals, allVals []cty.Value + for it := old.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if new.HasElement(val).False() { + removedVals = append(removedVals, val) + } + } + for it := new.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if val.IsKnown() && old.HasElement(val).False() { + addedVals = append(addedVals, val) + } + } + + var all, added, removed cty.Value + if len(allVals) > 0 { + all = cty.SetVal(allVals) + } else { + all = cty.SetValEmpty(ty.ElementType()) + } + if len(addedVals) > 0 { + added = cty.SetVal(addedVals) + } else { + added = cty.SetValEmpty(ty.ElementType()) + } + if len(removedVals) > 0 { + removed = cty.SetVal(removedVals) + } else { + removed = cty.SetValEmpty(ty.ElementType()) + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + + var action plans.Action + switch { + case !val.IsKnown(): + action = plans.Update + case added.HasElement(val).True(): + action = plans.Create + case removed.HasElement(val).True(): + action = plans.Delete + default: + action = plans.NoOp + } + + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + case ty.IsListType() || ty.IsTupleType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) + for _, elemDiff := range elemDiffs { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(elemDiff.Action) + switch elemDiff.Action { + case plans.NoOp, plans.Delete: + p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) + case plans.Update: + p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) + case plans.Create: + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return. + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + } + + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + + case ty.IsMapType(): + p.buf.WriteString("{") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := cty.StringVal(k) + var action plans.Action + if old.HasIndex(kV).False() { + action = plans.Create + } else if new.HasIndex(kV).False() { + action = plans.Delete + } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.IndexStep{Key: kV}) + + p.writeActionSymbol(action) + p.writeValue(kV, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + switch action { + case plans.Create, plans.NoOp: + v := new.Index(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.Index(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.Index(kV) + newV := new.Index(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteByte('\n') + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + return + case ty.IsObjectType(): + p.buf.WriteString("{") + p.buf.WriteString("\n") + + forcesNewResource := p.pathForcesNewResource(path) + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := k + var action plans.Action + if !old.Type().HasAttribute(kV) { + action = plans.Create + } else if !new.Type().HasAttribute(kV) { + action = plans.Delete + } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.GetAttrStep{Name: kV}) + + p.writeActionSymbol(action) + p.buf.WriteString(k) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + + switch action { + case plans.Create, plans.NoOp: + v := new.GetAttr(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.GetAttr(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.GetAttr(kV) + newV := new.GetAttr(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + if forcesNewResource { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + } + + // In all other cases, we just show the new and old values as-is + p.writeValue(old, plans.Delete, indent) + if new.IsNull() { + p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) + } else { + p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) + } + + p.writeValue(new, plans.Create, indent) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } +} + +// writeActionSymbol writes a symbol to represent the given action, followed +// by a space. +// +// It only supports the actions that can be represented with a single character: +// Create, Delete, Update and NoAction. +func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { + switch action { + case plans.Create: + p.buf.WriteString(p.color.Color("[green]+[reset] ")) + case plans.Delete: + p.buf.WriteString(p.color.Color("[red]-[reset] ")) + case plans.Update: + p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) + case plans.NoOp: + p.buf.WriteString(" ") + default: + // Should never happen + p.buf.WriteString(p.color.Color("? ")) + } +} + +func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { + if !p.action.IsReplace() { + // "requiredReplace" only applies when the instance is being replaced + return false + } + return p.requiredReplace.Has(path) +} + +func ctyEmptyString(value cty.Value) bool { + if !value.IsNull() && value.IsKnown() { + valueType := value.Type() + if valueType == cty.String && value.AsString() == "" { + return true + } + } + return false +} + +func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { + attrType := val.Type().AttributeType(name) + + if val.IsNull() { + return cty.NullVal(attrType) + } + + // We treat "" as null here + // as existing SDK doesn't support null yet. + // This allows us to avoid spurious diffs + // until we introduce null to the SDK. + attrValue := val.GetAttr(name) + if ctyEmptyString(attrValue) { + return cty.NullVal(attrType) + } + + return attrValue +} + +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + ret := make([]cty.Value, 0, val.LengthInt()) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + return ret +} + +// ctySequenceDiff returns differences between given sequences of cty.Value(s) +// in the form of Create, Delete, or Update actions (for objects). +func ctySequenceDiff(old, new []cty.Value) []*plans.Change { + var ret []*plans.Change + lcs := objchange.LongestCommonSubsequence(old, new) + var oldI, newI, lcsI int + for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { + for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { + isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) + if isObjectDiff && newI < len(new) { + ret = append(ret, &plans.Change{ + Action: plans.Update, + Before: old[oldI], + After: new[newI], + }) + oldI++ + newI++ // we also consume the next "new" in this case + continue + } + + ret = append(ret, &plans.Change{ + Action: plans.Delete, + Before: old[oldI], + After: cty.NullVal(old[oldI].Type()), + }) + oldI++ + } + for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { + ret = append(ret, &plans.Change{ + Action: plans.Create, + Before: cty.NullVal(new[newI].Type()), + After: new[newI], + }) + newI++ + } + if lcsI < len(lcs) { + ret = append(ret, &plans.Change{ + Action: plans.NoOp, + Before: lcs[lcsI], + After: lcs[lcsI], + }) + + // All of our indexes advance together now, since the line + // is common to all three sequences. + lcsI++ + oldI++ + newI++ + } + } + return ret +} + +func ctyEqualWithUnknown(old, new cty.Value) bool { + if !old.IsWhollyKnown() || !new.IsWhollyKnown() { + return false + } + return old.Equals(new).True() +} + +// ctyTypesEqual checks equality of two types more loosely +// by avoiding checks of object/tuple elements +// as we render differences on element-by-element basis anyway +func ctyTypesEqual(oldT, newT cty.Type) bool { + if oldT.IsObjectType() && newT.IsObjectType() { + return true + } + if oldT.IsTupleType() && newT.IsTupleType() { + return true + } + return oldT.Equals(newT) +} + +func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { + if cap(path)-len(path) >= minExtra { + return path + } + newCap := cap(path) * 2 + if newCap < (len(path) + minExtra) { + newCap = len(path) + minExtra + } + newPath := make(cty.Path, len(path), newCap) + copy(newPath, path) + return newPath +} + +// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "list" is +// actually represented as a tuple type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsListType() { + return cty.ListValEmpty(ty.ElementType()) + } + return cty.EmptyTupleVal // must need a tuple, then +} + +// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "map" is +// actually represented as an object type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsMapType() { + return cty.MapValEmpty(ty.ElementType()) + } + return cty.EmptyObjectVal // must need an object, then +} + +// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + // Dynamically-typed attributes are not supported inside blocks backed by + // sets, so our result here is always a set. + return cty.SetValEmpty(in.Type().ElementType()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go new file mode 100644 index 000000000..aa8d7deb2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/format.go @@ -0,0 +1,8 @@ +// Package format contains helpers for formatting various Terraform +// structures for human-readabout output. +// +// This package is used by the official Terraform CLI in formatting any +// output and is exported to encourage non-official frontends to mimic the +// output formatting as much as possible so that text formats of Terraform +// structures have a consistent look and feel. +package format diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go new file mode 100644 index 000000000..85ebbfec5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/object_id.go @@ -0,0 +1,123 @@ +package format + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ObjectValueID takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a unique identifier in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// This function will panic if the given value is not of an object type. +func ObjectValueID(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["id"] == cty.String: + v := obj.GetAttr("id") + if v.IsKnown() && !v.IsNull() { + return "id", v.AsString() + } + + case atys["name"] == cty.String: + // "name" isn't always globally unique, but if there isn't also an + // "id" then it _often_ is, in practice. + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + } + + return "", "" +} + +// ObjectValueName takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a human-friendly name in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// Callers that use both ObjectValueName and ObjectValueID at the same time +// should be prepared to get the same attribute key and value from both in +// some cases, since there is overlap betweek the id-extraction and +// name-extraction heuristics. +// +// This function will panic if the given value is not of an object type. +func ObjectValueName(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["name"] == cty.String: + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + + case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: + tags := obj.GetAttr("tags") + if tags.IsNull() || !tags.IsWhollyKnown() { + break + } + + switch { + case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): + v := tags.Index(cty.StringVal("name")) + if v.IsKnown() && !v.IsNull() { + return "tags.name", v.AsString() + } + case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): + // AWS-style naming convention + v := tags.Index(cty.StringVal("Name")) + if v.IsKnown() && !v.IsNull() { + return "tags.Name", v.AsString() + } + } + } + + return "", "" +} + +// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID +// and ObjectValueName (in that preference order) to try to extract some sort +// of human-friendly descriptive string value for an object as additional +// context about an object when it is being displayed in a compact way (where +// not all of the attributes are visible.) +// +// Just as with the two functions it wraps, it is a best-effort and may return +// two empty strings if no suitable attribute can be found for a given object. +func ObjectValueIDOrName(obj cty.Value) (k, v string) { + k, v = ObjectValueID(obj) + if k != "" { + return + } + k, v = ObjectValueName(obj) + return +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go new file mode 100644 index 000000000..14869ad3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/command/format/state.go @@ -0,0 +1,208 @@ +package format + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" +) + +// StateOpts are the options for formatting a state. +type StateOpts struct { + // State is the state to format. This is required. + State *states.State + + // Schemas are used to decode attributes. This is required. + Schemas *terraform.Schemas + + // Color is the colorizer. This is optional. + Color *colorstring.Colorize +} + +// State takes a state and returns a string +func State(opts *StateOpts) string { + if opts.Color == nil { + panic("colorize not given") + } + + if opts.Schemas == nil { + panic("schemas not given") + } + + s := opts.State + if len(s.Modules) == 0 { + return "The state file is empty. No resources are represented." + } + + buf := bytes.NewBufferString("[reset]") + p := blockBodyDiffPrinter{ + buf: buf, + color: opts.Color, + action: plans.NoOp, + } + + // Format all the modules + for _, m := range s.Modules { + formatStateModule(p, m, opts.Schemas) + } + + // Write the outputs for the root module + m := s.RootModule() + + if m.OutputValues != nil { + if len(m.OutputValues) > 0 { + p.buf.WriteString("Outputs:\n\n") + } + + // Sort the outputs + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + // Output each output k/v pair + for _, k := range ks { + v := m.OutputValues[k] + p.buf.WriteString(fmt.Sprintf("%s = ", k)) + p.writeValue(v.Value, plans.NoOp, 0) + p.buf.WriteString("\n") + } + } + + trimmedOutput := strings.TrimSpace(p.buf.String()) + trimmedOutput += "[reset]" + + return opts.Color.Color(trimmedOutput) + +} + +func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { + // First get the names of all the resources so we can show them + // in alphabetical order. + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + sort.Strings(names) + + // Go through each resource and begin building up the output. + for _, key := range names { + for k, v := range m.Resources[key].Instances { + // keep these in order to keep the current object first, and + // provide deterministic output for the deposed objects + type obj struct { + header string + instance *states.ResourceInstanceObjectSrc + } + instances := []obj{} + + addr := m.Resources[key].Addr + + taintStr := "" + if v.Current != nil && v.Current.Status == 'T' { + taintStr = " (tainted)" + } + + instances = append(instances, + obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current}) + + for dk, v := range v.Deposed { + instances = append(instances, + obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v}) + } + + // Sort the instances for consistent output. + // Starting the sort from the second index, so the current instance + // is always first. + sort.Slice(instances[1:], func(i, j int) bool { + return instances[i+1].header < instances[j+1].header + }) + + for _, obj := range instances { + header := obj.header + instance := obj.instance + p.buf.WriteString(header) + if instance == nil { + // this shouldn't happen, but there's nothing to do here so + // don't panic below. + continue + } + + var schema *configschema.Block + provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() + if _, exists := schemas.Providers[provider]; !exists { + // This should never happen in normal use because we should've + // loaded all of the schemas and checked things prior to this + // point. We can't return errors here, but since this is UI code + // we will try to do _something_ reasonable. + p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) + continue + } + + switch addr.Mode { + case addrs.ManagedResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "resource %q %q {", + addr.Type, + addr.Name, + )) + case addrs.DataResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) + continue + } + + p.buf.WriteString(fmt.Sprintf( + "data %q %q {", + addr.Type, + addr.Name, + )) + default: + // should never happen, since the above is exhaustive + p.buf.WriteString(addr.String()) + } + + val, err := instance.Decode(schema.ImpliedType()) + if err != nil { + fmt.Println(err.Error()) + break + } + + path := make(cty.Path, 0, 3) + bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) + if bodyWritten { + p.buf.WriteString("\n") + } + + p.buf.WriteString("}\n\n") + } + } + } + p.buf.WriteString("\n") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go new file mode 100644 index 000000000..76d161d72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/backend.go @@ -0,0 +1,24 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go new file mode 100644 index 000000000..e594ebd40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/compat_shim.go @@ -0,0 +1,116 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// ------------------------------------------------------------------------- +// Functions in this file are compatibility shims intended to ease conversion +// from the old configuration loader. Any use of these functions that makes +// a change should generate a deprecation warning explaining to the user how +// to update their code for new patterns. +// +// Shims are particularly important for any patterns that have been widely +// documented in books, tutorials, etc. Users will still be starting from +// these examples and we want to help them adopt the latest patterns rather +// than leave them stranded. +// ------------------------------------------------------------------------- + +// shimTraversalInString takes any arbitrary expression and checks if it is +// a quoted string in the native syntax. If it _is_, then it is parsed as a +// traversal and re-wrapped into a synthetic traversal expression and a +// warning is generated. Otherwise, the given expression is just returned +// verbatim. +// +// This function has no effect on expressions from the JSON syntax, since +// traversals in strings are the required pattern in that syntax. +// +// If wantKeyword is set, the generated warning diagnostic will talk about +// keywords rather than references. The behavior is otherwise unchanged, and +// the caller remains responsible for checking that the result is indeed +// a keyword, e.g. using hcl.ExprAsKeyword. +func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { + // ObjectConsKeyExpr is a special wrapper type used for keys on object + // constructors to deal with the fact that naked identifiers are normally + // handled as "bareword" strings rather than as variable references. Since + // we know we're interpreting as a traversal anyway (and thus it won't + // matter whether it's a string or an identifier) we can safely just unwrap + // here and then process whatever we find inside as normal. + if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { + expr = ocke.Wrapped + } + + if !exprIsNativeQuotedString(expr) { + return expr, nil + } + + strVal, diags := expr.Value(nil) + if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { + // Since we're not even able to attempt a shim here, we'll discard + // the diagnostics we saw so far and let the caller's own error + // handling take care of reporting the invalid expression. + return expr, nil + } + + // The position handling here isn't _quite_ right because it won't + // take into account any escape sequences in the literal string, but + // it should be close enough for any error reporting to make sense. + srcRange := expr.Range() + startPos := srcRange.Start // copy + startPos.Column++ // skip initial quote + startPos.Byte++ // skip initial quote + + traversal, tDiags := hclsyntax.ParseTraversalAbs( + []byte(strVal.AsString()), + srcRange.Filename, + startPos, + ) + diags = append(diags, tDiags...) + + // For initial release our deprecation warnings are disabled to allow + // a period where modules can be compatible with both old and new + // conventions. + // FIXME: Re-enable these deprecation warnings in a release prior to + // Terraform 0.13 and then remove the shims altogether for 0.13. + /* + if wantKeyword { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted keywords are deprecated", + Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", + Subject: &srcRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted references are deprecated", + Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", + Subject: &srcRange, + }) + } + */ + + return &hclsyntax.ScopeTraversalExpr{ + Traversal: traversal, + SrcRange: srcRange, + }, diags +} + +// shimIsIgnoreChangesStar returns true if the given expression seems to be +// a string literal whose value is "*". This is used to support a legacy +// form of ignore_changes = all . +// +// This function does not itself emit any diagnostics, so it's the caller's +// responsibility to emit a warning diagnostic when this function returns true. +func shimIsIgnoreChangesStar(expr hcl.Expression) bool { + val, valDiags := expr.Value(nil) + if valDiags.HasErrors() { + return false + } + if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { + return false + } + return val.AsString() == "*" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go new file mode 100644 index 000000000..82c88a10f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config.go @@ -0,0 +1,164 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// ProviderTypes returns the names of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []string { + m := make(map[string]struct{}) + c.gatherProviderTypes(m) + + ret := make([]string, 0, len(m)) + for k := range m { + ret = append(ret, k) + } + sort.Strings(ret) + return ret +} +func (c *Config) gatherProviderTypes(m map[string]struct{}) { + if c == nil { + return + } + + for _, pc := range c.Module.ProviderConfigs { + m[pc.Name] = struct{}{} + } + for _, rc := range c.Module.ManagedResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + for _, rc := range c.Module.DataResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + + // Must also visit our child modules, recursively. + for _, cc := range c.Children { + cc.gatherProviderTypes(m) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go new file mode 100644 index 000000000..cb46b65aa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/config_build.go @@ -0,0 +1,160 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallRange: call.DeclRange, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = append(diags, modDiags...) + + ret[call.Name] = child + } + + return ret, diags +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go new file mode 100644 index 000000000..ebbeb3b62 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/copy_dir.go @@ -0,0 +1,125 @@ +package configload + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go new file mode 100644 index 000000000..8b615f902 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/doc.go @@ -0,0 +1,4 @@ +// Package configload knows how to install modules into the .terraform/modules +// directory and to load modules from those installed locations. It is used +// in conjunction with the LoadConfig function in the parent package. +package configload diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go new file mode 100644 index 000000000..57df04145 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go new file mode 100644 index 000000000..4dc28eaa8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go new file mode 100644 index 000000000..0d22e6726 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package configload + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go new file mode 100644 index 000000000..0d12d7d2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader.go @@ -0,0 +1,101 @@ +package configload + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/spf13/afero" +) + +// A Loader instance is the main entry-point for loading configurations via +// this package. +// +// It extends the general config-loading functionality in the parent package +// "configs" to support installation of modules from remote sources and +// loading full configurations using modules that were previously installed. +type Loader struct { + // parser is used to read configuration + parser *configs.Parser + + // modules is used to install and locate descendent modules that are + // referenced (directly or indirectly) from the root module. + modules moduleMgr +} + +// Config is used with NewLoader to specify configuration arguments for the +// loader. +type Config struct { + // ModulesDir is a path to a directory where descendent modules are + // (or should be) installed. (This is usually the + // .terraform/modules directory, in the common case where this package + // is being loaded from the main Terraform CLI package.) + ModulesDir string + + // Services is the service discovery client to use when locating remote + // module registry endpoints. If this is nil then registry sources are + // not supported, which should be true only in specialized circumstances + // such as in tests. + Services *disco.Disco +} + +// NewLoader creates and returns a loader that reads configuration from the +// real OS filesystem. +// +// The loader has some internal state about the modules that are currently +// installed, which is read from disk as part of this function. If that +// manifest cannot be read then an error will be returned. +func NewLoader(config *Config) (*Loader, error) { + fs := afero.NewOsFs() + parser := configs.NewParser(fs) + reg := registry.NewClient(config.Services, nil) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: true, + Dir: config.ModulesDir, + Services: config.Services, + Registry: reg, + }, + } + + err := ret.modules.readModuleManifestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", err) + } + + return ret, nil +} + +// ModulesDir returns the path to the directory where the loader will look for +// the local cache of remote module packages. +func (l *Loader) ModulesDir() string { + return l.modules.Dir +} + +// RefreshModules updates the in-memory cache of the module manifest from the +// module manifest file on disk. This is not necessary in normal use because +// module installation and configuration loading are separate steps, but it +// can be useful in tests where module installation is done as a part of +// configuration loading by a helper function. +// +// Call this function after any module installation where an existing loader +// is already alive and may be used again later. +// +// An error is returned if the manifest file cannot be read. +func (l *Loader) RefreshModules() error { + if l == nil { + // Nothing to do, then. + return nil + } + return l.modules.readModuleManifestSnapshot() +} + +// Sources returns the source code cache for the underlying parser of this +// loader. This is a shorthand for l.Parser().Sources(). +func (l *Loader) Sources() map[string][]byte { + return l.parser.Sources() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go new file mode 100644 index 000000000..bcfa733e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_load.go @@ -0,0 +1,105 @@ +package configload + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// LoadConfig reads the Terraform module in the given directory and uses it as the +// root module to build the static module tree that represents a configuration, +// assuming that all required descendent modules have already been installed. +// +// If error diagnostics are returned, the returned configuration may be either +// nil or incomplete. In the latter case, cautious static analysis is possible +// in spite of the errors. +// +// LoadConfig performs the basic syntax and uniqueness validations that are +// required to process the individual modules, and also detects +func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, diags + } + + cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) + diags = append(diags, cDiags...) + + return cfg, diags +} + +// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that +// are presumed to have already been installed. A different function +// (moduleWalkerInstall) is used for installation. +func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + // Since we're just loading here, we expect that all referenced modules + // will be already installed and described in our manifest. However, we + // do verify that the manifest and the configuration are in agreement + // so that we can prompt the user to run "terraform init" if not. + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.CallRange, + }, + } + } + + var diags hcl.Diagnostics + + // Check for inconsistencies between manifest and config + if req.SourceAddr != record.SourceAddr { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module source has changed", + Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if len(req.VersionConstraint.Required) > 0 && record.Version == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: fmt.Sprintf( + "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + record.Version, + ), + Subject: &req.SourceAddrRange, + }) + } + + mod, mDiags := l.parser.LoadConfigDir(record.Dir) + diags = append(diags, mDiags...) + if mod == nil { + // nil specifically indicates that the directory does not exist or + // cannot be read, so in this case we'll discard any generic diagnostics + // returned from LoadConfigDir and produce our own context-sensitive + // error message. + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), + Subject: &req.CallRange, + }, + } + } + + return mod, record.Version, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go new file mode 100644 index 000000000..0772edc71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/loader_snapshot.go @@ -0,0 +1,492 @@ +package configload + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/spf13/afero" +) + +// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously +// creates an in-memory snapshot of the configuration files used, which can +// be later used to create a loader that may read only from this snapshot. +func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, nil, diags + } + + snap := &Snapshot{ + Modules: map[string]*SnapshotModule{}, + } + walker := l.makeModuleWalkerSnapshot(snap) + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = append(diags, cDiags...) + + addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) + diags = append(diags, addDiags...) + + return cfg, snap, diags +} + +// NewLoaderFromSnapshot creates a Loader that reads files only from the +// given snapshot. +// +// A snapshot-based loader cannot install modules, so calling InstallModules +// on the return value will cause a panic. +// +// A snapshot-based loader also has access only to configuration files. Its +// underlying parser does not have access to other files in the native +// filesystem, such as values files. For those, either use a normal loader +// (created by NewLoader) or use the configs.Parser API directly. +func NewLoaderFromSnapshot(snap *Snapshot) *Loader { + fs := snapshotFS{snap} + parser := configs.NewParser(fs) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: false, + manifest: snap.moduleManifest(), + }, + } + + return ret +} + +// Snapshot is an in-memory representation of the source files from a +// configuration, which can be used as an alternative configurations source +// for a loader with NewLoaderFromSnapshot. +// +// The primary purpose of a Snapshot is to build the configuration portion +// of a plan file (see ../../plans/planfile) so that it can later be reloaded +// and used to recover the exact configuration that the plan was built from. +type Snapshot struct { + // Modules is a map from opaque module keys (suitable for use as directory + // names on all supported operating systems) to the snapshot information + // about each module. + Modules map[string]*SnapshotModule +} + +// SnapshotModule represents a single module within a Snapshot. +type SnapshotModule struct { + // Dir is the path, relative to the root directory given when the + // snapshot was created, where the module appears in the snapshot's + // virtual filesystem. + Dir string + + // Files is a map from each configuration file filename for the + // module to a raw byte representation of the source file contents. + Files map[string][]byte + + // SourceAddr is the source address given for this module in configuration. + SourceAddr string `json:"Source"` + + // Version is the version of the module that is installed, or nil if + // the module is installed from a source that does not support versions. + Version *version.Version `json:"-"` +} + +// moduleManifest constructs a module manifest based on the contents of +// the receiving snapshot. +func (s *Snapshot) moduleManifest() modsdir.Manifest { + ret := make(modsdir.Manifest) + + for k, modSnap := range s.Modules { + ret[k] = modsdir.Record{ + Key: k, + Dir: modSnap.Dir, + SourceAddr: modSnap.SourceAddr, + Version: modSnap.Version, + } + } + + return ret +} + +// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit +// the same lookup behaviors as l.moduleWalkerLoad but will additionally write +// source files from the referenced modules into the given snapshot. +func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, v, diags := l.moduleWalkerLoad(req) + if diags.HasErrors() { + return mod, v, diags + } + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + // Should never happen, since otherwise moduleWalkerLoader would've + // returned an error and we would've returned already. + panic(fmt.Sprintf("module %s is not present in manifest", key)) + } + + addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) + diags = append(diags, addDiags...) + + return mod, v, diags + }, + ) +} + +func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { + var diags hcl.Diagnostics + + primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) + if moreDiags.HasErrors() { + // Any diagnostics we get here should be already present + // in diags, so it's weird if we get here but we'll allow it + // and return a general error message in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read directory for module", + Detail: fmt.Sprintf("The source directory %s could not be read", dir), + }) + return diags + } + + snapMod := &SnapshotModule{ + Dir: dir, + Files: map[string][]byte{}, + SourceAddr: sourceAddr, + Version: v, + } + + files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) + files = append(files, primaryFiles...) + files = append(files, overrideFiles...) + sources := l.Sources() // should be populated with all the files we need by now + for _, filePath := range files { + filename := filepath.Base(filePath) + src, exists := sources[filePath] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing source file for snapshot", + Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), + }) + continue + } + snapMod.Files[filepath.Clean(filename)] = src + } + + snap.Modules[key] = snapMod + + return diags +} + +// snapshotFS is an implementation of afero.Fs that reads from a snapshot. +// +// This is not intended as a general-purpose filesystem implementation. Instead, +// it just supports the minimal functionality required to support the +// configuration loader and parser as an implementation detail of creating +// a loader from a snapshot. +type snapshotFS struct { + snap *Snapshot +} + +var _ afero.Fs = snapshotFS{} + +func (fs snapshotFS) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("cannot create file inside configuration snapshot") +} + +func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directory inside configuration snapshot") +} + +func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directories inside configuration snapshot") +} + +func (fs snapshotFS) Open(name string) (afero.File, error) { + + // Our "filesystem" is sparsely populated only with the directories + // mentioned by modules in our snapshot, so the high-level process + // for opening a file is: + // - Find the module snapshot corresponding to the containing directory + // - Find the file within that snapshot + // - Wrap the resulting byte slice in a snapshotFile to return + // + // The other possibility handled here is if the given name is for the + // module directory itself, in which case we'll return a snapshotDir + // instead. + // + // This function doesn't try to be incredibly robust in supporting + // different permutations of paths, etc because in practice we only + // need to support the path forms that our own loader and parser will + // generate. + + dir := filepath.Dir(name) + fn := filepath.Base(name) + directDir := filepath.Clean(name) + + // First we'll check to see if this is an exact path for a module directory. + // We need to do this first (rather than as part of the next loop below) + // because a module in a child directory of another module can otherwise + // appear to be a file in that parent directory. + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == directDir { + // We've matched the module directory itself + filenames := make([]string, 0, len(candidate.Files)) + for n := range candidate.Files { + filenames = append(filenames, n) + } + sort.Strings(filenames) + return snapshotDir{ + filenames: filenames, + }, nil + } + } + + // If we get here then the given path isn't a module directory exactly, so + // we'll treat it as a file path and try to find a module directory it + // could be located in. + var modSnap *SnapshotModule + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == dir { + modSnap = candidate + break + } + } + if modSnap == nil { + return nil, os.ErrNotExist + } + + src, exists := modSnap.Files[fn] + if !exists { + return nil, os.ErrNotExist + } + + return &snapshotFile{ + src: src, + }, nil +} + +func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return fs.Open(name) +} + +func (fs snapshotFS) Remove(name string) error { + return fmt.Errorf("cannot remove file inside configuration snapshot") +} + +func (fs snapshotFS) RemoveAll(path string) error { + return fmt.Errorf("cannot remove files inside configuration snapshot") +} + +func (fs snapshotFS) Rename(old, new string) error { + return fmt.Errorf("cannot rename file inside configuration snapshot") +} + +func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + _, isDir := f.(snapshotDir) + return snapshotFileInfo{ + name: filepath.Base(name), + isDir: isDir, + }, nil +} + +func (fs snapshotFS) Name() string { + return "ConfigSnapshotFS" +} + +func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("cannot set file mode inside configuration snapshot") +} + +func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("cannot set file times inside configuration snapshot") +} + +type snapshotFile struct { + snapshotFileStub + src []byte + at int64 +} + +var _ afero.File = (*snapshotFile)(nil) + +func (f *snapshotFile) Read(p []byte) (n int, err error) { + if len(p) > 0 && f.at == int64(len(f.src)) { + return 0, io.EOF + } + if f.at > int64(len(f.src)) { + return 0, io.ErrUnexpectedEOF + } + if int64(len(f.src))-f.at >= int64(len(p)) { + n = len(p) + } else { + n = int(int64(len(f.src)) - f.at) + } + copy(p, f.src[f.at:f.at+int64(n)]) + f.at += int64(n) + return +} + +func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { + f.at = off + return f.Read(p) +} + +func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + f.at = offset + case 1: + f.at += offset + case 2: + f.at = int64(len(f.src)) + offset + } + return f.at, nil +} + +type snapshotDir struct { + snapshotFileStub + filenames []string + at int +} + +var _ afero.File = snapshotDir{} + +func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { + names, err := f.Readdirnames(count) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(names)) + for i, name := range names { + ret[i] = snapshotFileInfo{ + name: name, + isDir: false, + } + } + return ret, nil +} + +func (f snapshotDir) Readdirnames(count int) ([]string, error) { + var outLen int + names := f.filenames[f.at:] + if count > 0 { + if len(names) < count { + outLen = len(names) + } else { + outLen = count + } + if len(names) == 0 { + return nil, io.EOF + } + } else { + outLen = len(names) + } + f.at += outLen + + return names[:outLen], nil +} + +// snapshotFileInfo is a minimal implementation of os.FileInfo to support our +// virtual filesystem from snapshots. +type snapshotFileInfo struct { + name string + isDir bool +} + +var _ os.FileInfo = snapshotFileInfo{} + +func (fi snapshotFileInfo) Name() string { + return fi.name +} + +func (fi snapshotFileInfo) Size() int64 { + // In practice, our parser and loader never call Size + return -1 +} + +func (fi snapshotFileInfo) Mode() os.FileMode { + return os.ModePerm +} + +func (fi snapshotFileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi snapshotFileInfo) IsDir() bool { + return fi.isDir +} + +func (fi snapshotFileInfo) Sys() interface{} { + return nil +} + +type snapshotFileStub struct{} + +func (f snapshotFileStub) Close() error { + return nil +} + +func (f snapshotFileStub) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("cannot seek") +} + +func (f snapshotFileStub) Write(p []byte) (n int, err error) { + return f.WriteAt(p, 0) +} + +func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) WriteString(s string) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) Name() string { + // in practice, the loader and parser never use this + return "" +} + +func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Stat() (os.FileInfo, error) { + return nil, fmt.Errorf("cannot stat") +} + +func (f snapshotFileStub) Sync() error { + return nil +} + +func (f snapshotFileStub) Truncate(size int64) error { + return fmt.Errorf("cannot write to file in snapshot") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go new file mode 100644 index 000000000..797f50d24 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/module_mgr.go @@ -0,0 +1,62 @@ +package configload + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/spf13/afero" +) + +type moduleMgr struct { + FS afero.Afero + + // CanInstall is true for a module manager that can support installation. + // + // This must be set only if FS is an afero.OsFs, because the installer + // (which uses go-getter) is not aware of the virtual filesystem + // abstraction and will always write into the "real" filesystem. + CanInstall bool + + // Dir is the path where descendent modules are (or will be) installed. + Dir string + + // Services is a service discovery client that will be used to find + // remote module registry endpoints. This object may be pre-loaded with + // cached discovery information. + Services *disco.Disco + + // Registry is a client for the module registry protocol, which is used + // when a module is requested from a registry source. + Registry *registry.Client + + // manifest tracks the currently-installed modules for this manager. + // + // The loader may read this. Only the installer may write to it, and + // after a set of updates are completed the installer must call + // writeModuleManifestSnapshot to persist a snapshot of the manifest + // to disk for use on subsequent runs. + manifest modsdir.Manifest +} + +func (m *moduleMgr) manifestSnapshotPath() string { + return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) +} + +// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. +func (m *moduleMgr) readModuleManifestSnapshot() error { + r, err := m.FS.Open(m.manifestSnapshotPath()) + if err != nil { + if os.IsNotExist(err) { + // We'll treat a missing file as an empty manifest + m.manifest = make(modsdir.Manifest) + return nil + } + return err + } + + m.manifest, err = modsdir.ReadManifestSnapshot(r) + return err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go new file mode 100644 index 000000000..86ca9d10b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configload/testing.go @@ -0,0 +1,43 @@ +package configload + +import ( + "io/ioutil" + "os" + "testing" +) + +// NewLoaderForTests is a variant of NewLoader that is intended to be more +// convenient for unit tests. +// +// The loader's modules directory is a separate temporary directory created +// for each call. Along with the created loader, this function returns a +// cleanup function that should be called before the test completes in order +// to remove that temporary directory. +// +// In the case of any errors, t.Fatal (or similar) will be called to halt +// execution of the test, so the calling test does not need to handle errors +// itself. +func NewLoaderForTests(t *testing.T) (*Loader, func()) { + t.Helper() + + modulesDir, err := ioutil.TempDir("", "tf-configs") + if err != nil { + t.Fatalf("failed to create temporary modules dir: %s", err) + return nil, func() {} + } + + cleanup := func() { + os.RemoveAll(modulesDir) + } + + loader, err := NewLoader(&Config{ + ModulesDir: modulesDir, + }) + if err != nil { + cleanup() + t.Fatalf("failed to create config loader: %s", err) + return nil, func() {} + } + + return loader, cleanup +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go new file mode 100644 index 000000000..41a533745 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/coerce_value.go @@ -0,0 +1,250 @@ +package configschema + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + default: + attrs[typeName] = blockS.EmptyValue() + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + default: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + default: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go new file mode 100644 index 000000000..2c21ca5e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/decoder_spec.go @@ -0,0 +1,123 @@ +package configschema + +import ( + "github.com/hashicorp/hcl/v2/hcldec" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null values if any of the +// block attributes are defined as optional and/or computed respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + ret[name] = attrS.decoderSpec(name) + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + // We can only validate 0 or 1 for MinItems, because a dynamic block + // may satisfy any number of min items while only having a single + // block in the config. We cannot validate MaxItems because a + // configuration may have any number of dynamic blocks + minItems := 0 + if blockS.MinItems > 1 { + minItems = 1 + } + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1, + } + if blockS.Nesting == NestingGroup { + ret[name] = &hcldec.DefaultSpec{ + Primary: ret[name], + Default: &hcldec.LiteralSpec{ + Value: blockS.EmptyValue(), + }, + } + } + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockTupleSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + } else { + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + } + case NestingSet: + // We forbid dynamically-typed attributes inside NestingSet in + // InternalValidate, so we don't do anything special to handle + // that here. (There is no set analog to tuple and object types, + // because cty's set implementation depends on knowing the static + // type in order to properly compute its internal hashes.) + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: minItems, + } + case NestingMap: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockObjectSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } else { + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} + +func (a *Attribute) decoderSpec(name string) hcldec.Spec { + return &hcldec.AttrSpec{ + Name: name, + Type: a.Type, + Required: a.Required, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go new file mode 100644 index 000000000..caf8d730c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go new file mode 100644 index 000000000..005da56bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/empty_value.go @@ -0,0 +1,59 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go new file mode 100644 index 000000000..51f51cebc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/implied_type.go @@ -0,0 +1,21 @@ +package configschema + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go new file mode 100644 index 000000000..ebf1abbab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/internal_validate.go @@ -0,0 +1,105 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingGroup: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + if blockS.Nesting == NestingSet { + ety := blockS.Block.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go new file mode 100644 index 000000000..febe743e1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go new file mode 100644 index 000000000..0be3b8fa3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/none_required.go @@ -0,0 +1,38 @@ +package configschema + +// NoneRequired returns a deep copy of the receiver with any required +// attributes translated to optional. +func (b *Block) NoneRequired() *Block { + ret := &Block{} + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + ret.Attributes[name] = attrS.forceOptional() + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + ret.BlockTypes[name] = blockS.noneRequired() + } + + return ret +} + +func (b *NestedBlock) noneRequired() *NestedBlock { + ret := *b + ret.Block = *(ret.Block.NoneRequired()) + ret.MinItems = 0 + ret.MaxItems = 0 + return &ret +} + +func (a *Attribute) forceOptional() *Attribute { + ret := *a + ret.Optional = true + ret.Required = false + return &ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go new file mode 100644 index 000000000..f4702d369 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/schema.go @@ -0,0 +1,130 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go new file mode 100644 index 000000000..446705baf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema/validate_traversal.go @@ -0,0 +1,173 @@ +package configschema + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// StaticValidateTraversal checks whether the given traversal (which must be +// relative) refers to a construct in the receiving schema, returning error +// diagnostics if any problems are found. +// +// This method is "optimistic" in that it will not return errors for possible +// problems that cannot be detected statically. It is possible that an +// traversal which passed static validation will still fail when evaluated. +func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { + if !traversal.IsRelative() { + panic("StaticValidateTraversal on absolute traversal") + } + if len(traversal) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + next := traversal[0] + after := traversal[1:] + + var name string + switch step := next.(type) { + case hcl.TraverseAttr: + name = step.Name + case hcl.TraverseIndex: + // No other traversal step types are allowed directly at a block. + // If it looks like the user was trying to use index syntax to + // access an attribute then we'll produce a specialized message. + key := step.Key + if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { + maybeName := key.AsString() + if hclsyntax.ValidIdentifier(maybeName) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), + Subject: &step.SrcRange, + }) + return diags + } + } + // If it looks like some other kind of index then we'll use a generic error. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: &step.SrcRange, + }) + return diags + default: + // No other traversal types should appear in a normal valid traversal, + // but we'll handle this with a generic error anyway to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: next.SourceRange().Ptr(), + }) + return diags + } + + if attrS, exists := b.Attributes[name]; exists { + // For attribute validation we will just apply the rest of the + // traversal to an unknown value of the attribute type and pass + // through HCL's own errors, since we don't want to replicate all of + // HCL's type checking rules here. + val := cty.UnknownVal(attrS.Type) + _, hclDiags := after.TraverseRel(val) + diags = diags.Append(hclDiags) + return diags + } + + if blockS, exists := b.BlockTypes[name]; exists { + moreDiags := blockS.staticValidateTraversal(name, after) + diags = diags.Append(moreDiags) + return diags + } + + // If we get here then the name isn't valid at all. We'll collect up + // all of the names that _are_ valid to use as suggestions. + var suggestions []string + for name := range b.Attributes { + suggestions = append(suggestions, name) + } + for name := range b.BlockTypes { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unsupported attribute`, + Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), + Subject: next.SourceRange().Ptr(), + }) + + return diags +} + +func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { + if b.Nesting == NestingSingle || b.Nesting == NestingGroup { + // Single blocks are easy: just pass right through. + return b.Block.StaticValidateTraversal(traversal) + } + + if len(traversal) == 0 { + // It's always valid to access a nested block's attribute directly. + return nil + } + + var diags tfdiags.Diagnostics + next := traversal[0] + after := traversal[1:] + + switch b.Nesting { + + case NestingSet: + // Can't traverse into a set at all, since it does not have any keys + // to index with. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Cannot index a set value`, + Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), + Subject: next.SourceRange().Ptr(), + }) + return diags + + case NestingList: + if _, ok := next.(hcl.TraverseIndex); ok { + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), + Subject: next.SourceRange().Ptr(), + }) + } + return diags + + case NestingMap: + // Both attribute and index steps are valid for maps, so we'll just + // pass through here and let normal evaluation catch an + // incorrectly-typed index key later, if present. + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + return diags + + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. (Note that we handled NestingSingle separately + // back at the start of this function.) + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go new file mode 100644 index 000000000..036c2d6c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/depends_on.go @@ -0,0 +1,23 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { + var ret []hcl.Traversal + exprs, diags := hcl.ExprList(attr.Expr) + + for _, expr := range exprs { + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + ret = append(ret, traversal) + } + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go new file mode 100644 index 000000000..f01eb79f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/doc.go @@ -0,0 +1,19 @@ +// Package configs contains types that represent Terraform configurations and +// the different elements thereof. +// +// The functionality in this package can be used for some static analyses of +// Terraform configurations, but this package generally exposes representations +// of the configuration source code rather than the result of evaluating these +// objects. The sibling package "lang" deals with evaluation of structures +// and expressions in the configuration. +// +// Due to its close relationship with HCL, this package makes frequent use +// of types from the HCL API, including raw HCL diagnostic messages. Such +// diagnostics can be converted into Terraform-flavored diagnostics, if needed, +// using functions in the sibling package tfdiags. +// +// The Parser type is the main entry-point into this package. The LoadConfigDir +// method can be used to load a single module directory, and then a full +// configuration (including any descendent modules) can be produced using +// the top-level BuildConfig method. +package configs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go new file mode 100644 index 000000000..bb4228d98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/flatmap.go @@ -0,0 +1,424 @@ +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go new file mode 100644 index 000000000..3403c026b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go new file mode 100644 index 000000000..68f48da8f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl/v2" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go new file mode 100644 index 000000000..a074c749d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values.go @@ -0,0 +1,230 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go new file mode 100644 index 000000000..92f0213d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim/values_equiv.go @@ -0,0 +1,214 @@ +package hcl2shim + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go new file mode 100644 index 000000000..78223c3b8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module.go @@ -0,0 +1,404 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + Backend *Backend + ProviderConfigs map[string]*Provider + ProviderRequirements map[string][]VersionConstraint + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + Backends []*Backend + ProviderConfigs []*Provider + ProviderRequirements []*ProviderRequirement + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderRequirements: map[string][]VersionConstraint{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + for _, constraint := range file.CoreVersionConstraints { + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, reqd := range file.ProviderRequirements { + m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + } + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + for _, constraint := range file.CoreVersionConstraints { + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + if len(file.ProviderRequirements) != 0 { + mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go new file mode 100644 index 000000000..a484ffef9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_call.go @@ -0,0 +1,188 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// ModuleCall represents a "module" block in a module or file. +type ModuleCall struct { + Name string + + SourceAddr string + SourceAddrRange hcl.Range + SourceSet bool + + Config hcl.Body + + Version VersionConstraint + + Count hcl.Expression + ForEach hcl.Expression + + Providers []PassedProviderConfig + + DependsOn []hcl.Traversal + + DeclRange hcl.Range +} + +func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { + mc := &ModuleCall{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := moduleBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, remain, diags := block.Body.PartialContent(schema) + mc.Config = remain + + if !hclsyntax.ValidIdentifier(mc.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["source"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) + diags = append(diags, valDiags...) + mc.SourceAddrRange = attr.Expr.Range() + mc.SourceSet = true + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + mc.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + if attr, exists := content.Attributes["count"]; exists { + mc.Count = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["for_each"]; exists { + mc.ForEach = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + mc.DependsOn = append(mc.DependsOn, deps...) + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["providers"]; exists { + seen := make(map[string]hcl.Range) + pairs, pDiags := hcl.ExprMap(attr.Expr) + diags = append(diags, pDiags...) + for _, pair := range pairs { + key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") + diags = append(diags, keyDiags...) + value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") + diags = append(diags, valueDiags...) + if keyDiags.HasErrors() || valueDiags.HasErrors() { + continue + } + + matchKey := key.String() + if prev, exists := seen[matchKey]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider address", + Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), + Subject: pair.Value.Range().Ptr(), + }) + continue + } + + rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) + seen[matchKey] = rng + mc.Providers = append(mc.Providers, PassedProviderConfig{ + InChild: key, + InParent: value, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in module block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return mc, diags +} + +// PassedProviderConfig represents a provider config explicitly passed down to +// a child module, possibly giving it a new local address in the process. +type PassedProviderConfig struct { + InChild *ProviderConfigRef + InParent *ProviderConfigRef +} + +var moduleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + Required: true, + }, + { + Name: "version", + }, + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "depends_on", + }, + { + Name: "providers", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + // These are all reserved for future use. + {Type: "lifecycle"}, + {Type: "locals"}, + {Type: "provider", LabelNames: []string{"type"}}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go new file mode 100644 index 000000000..6fb82acfb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge.go @@ -0,0 +1,247 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// The methods in this file are used by Module.mergeFile to apply overrides +// to our different configuration elements. These methods all follow the +// pattern of mutating the receiver to incorporate settings from the parameter, +// returning error diagnostics if any aspect of the parameter cannot be merged +// into the receiver for some reason. +// +// User expectation is that anything _explicitly_ set in the given object +// should take precedence over the corresponding settings in the receiver, +// but that anything omitted in the given object should be left unchanged. +// In some cases it may be reasonable to do a "deep merge" of certain nested +// features, if it is possible to unambiguously correlate the nested elements +// and their behaviors are orthogonal to each other. + +func (p *Provider) merge(op *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if op.Version.Required != nil { + p.Version = op.Version + } + + p.Config = MergeBodies(p.Config, op.Config) + + return diags +} + +func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { + // Any provider name that's mentioned in the override gets nilled out in + // our map so that we'll rebuild it below. Any provider not mentioned is + // left unchanged. + for _, reqd := range ovrd { + delete(recv, reqd.Name) + } + for _, reqd := range ovrd { + recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) + } +} + +func (v *Variable) merge(ov *Variable) hcl.Diagnostics { + var diags hcl.Diagnostics + + if ov.DescriptionSet { + v.Description = ov.Description + v.DescriptionSet = ov.DescriptionSet + } + if ov.Default != cty.NilVal { + v.Default = ov.Default + } + if ov.Type != cty.NilType { + v.Type = ov.Type + } + if ov.ParsingMode != 0 { + v.ParsingMode = ov.ParsingMode + } + + // If the override file overrode type without default or vice-versa then + // it may have created an invalid situation, which we'll catch now by + // attempting to re-convert the value. + // + // Note that here we may be re-converting an already-converted base value + // from the base config. This will be a no-op if the type was not changed, + // but in particular might be user-observable in the edge case where the + // literal value in config could've been converted to the overridden type + // constraint but the converted value cannot. In practice, this situation + // should be rare since most of our conversions are interchangable. + if v.Default != cty.NilVal { + val, err := convert.Convert(v.Default, v.Type) + if err != nil { + // What exactly we'll say in the error message here depends on whether + // it was Default or Type that was overridden here. + switch { + case ov.Type != cty.NilType && ov.Default == cty.NilVal: + // If only the type was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), + Subject: &ov.DeclRange, + }) + case ov.Type == cty.NilType && ov.Default != cty.NilVal: + // Only the default was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + } + } else { + v.Default = val + } + } + + return diags +} + +func (l *Local) merge(ol *Local) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Since a local is just a single expression in configuration, the + // override definition entirely replaces the base definition, including + // the source range so that we'll send the user to the right place if + // there is an error. + l.Expr = ol.Expr + l.DeclRange = ol.DeclRange + + return diags +} + +func (o *Output) merge(oo *Output) hcl.Diagnostics { + var diags hcl.Diagnostics + + if oo.Description != "" { + o.Description = oo.Description + } + if oo.Expr != nil { + o.Expr = oo.Expr + } + if oo.SensitiveSet { + o.Sensitive = oo.Sensitive + o.SensitiveSet = oo.SensitiveSet + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(oo.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { + var diags hcl.Diagnostics + + if omc.SourceSet { + mc.SourceAddr = omc.SourceAddr + mc.SourceAddrRange = omc.SourceAddrRange + mc.SourceSet = omc.SourceSet + } + + if omc.Count != nil { + mc.Count = omc.Count + } + + if omc.ForEach != nil { + mc.ForEach = omc.ForEach + } + + if len(omc.Version.Required) != 0 { + mc.Version = omc.Version + } + + mc.Config = MergeBodies(mc.Config, omc.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(mc.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (r *Resource) merge(or *Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + + if r.Mode != or.Mode { + // This is always a programming error, since managed and data resources + // are kept in separate maps in the configuration structures. + panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) + } + + if or.Count != nil { + r.Count = or.Count + } + if or.ForEach != nil { + r.ForEach = or.ForEach + } + if or.ProviderConfigRef != nil { + r.ProviderConfigRef = or.ProviderConfigRef + } + if r.Mode == addrs.ManagedResourceMode { + // or.Managed is always non-nil for managed resource mode + + if or.Managed.Connection != nil { + r.Managed.Connection = or.Managed.Connection + } + if or.Managed.CreateBeforeDestroySet { + r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy + r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet + } + if len(or.Managed.IgnoreChanges) != 0 { + r.Managed.IgnoreChanges = or.Managed.IgnoreChanges + } + if or.Managed.PreventDestroySet { + r.Managed.PreventDestroy = or.Managed.PreventDestroy + r.Managed.PreventDestroySet = or.Managed.PreventDestroySet + } + if len(or.Managed.Provisioners) != 0 { + r.Managed.Provisioners = or.Managed.Provisioners + } + } + + r.Config = MergeBodies(r.Config, or.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(or.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go new file mode 100644 index 000000000..7b51eae85 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/module_merge_body.go @@ -0,0 +1,143 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +// MergeBodies creates a new HCL body that contains a combination of the +// given base and override bodies. Attributes and blocks defined in the +// override body take precedence over those of the same name defined in +// the base body. +// +// If any block of a particular type appears in "override" then it will +// replace _all_ of the blocks of the same type in "base" in the new +// body. +func MergeBodies(base, override hcl.Body) hcl.Body { + return mergeBody{ + Base: base, + Override: override, + } +} + +// mergeBody is a hcl.Body implementation that wraps a pair of other bodies +// and allows attributes and blocks within the override to take precedence +// over those defined in the base body. +// +// This is used to deal with dynamically-processed bodies in Module.mergeFile. +// It uses a shallow-only merging strategy where direct attributes defined +// in Override will override attributes of the same name in Base, while any +// blocks defined in Override will hide all blocks of the same type in Base. +// +// This cannot possibly "do the right thing" in all cases, because we don't +// have enough information about user intent. However, this behavior is intended +// to be reasonable for simple overriding use-cases. +type mergeBody struct { + Base hcl.Body + Override hcl.Body +} + +var _ hcl.Body = mergeBody{} + +func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, _, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + return content, diags +} + +func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + remain := MergeBodies(baseRemain, overrideRemain) + + return content, remain, diags +} + +func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + } + + // For attributes we just assign from each map in turn and let the override + // map clobber any matching entries from base. + for k, a := range base.Attributes { + content.Attributes[k] = a + } + for k, a := range override.Attributes { + content.Attributes[k] = a + } + + // Things are a little more interesting for blocks because they arrive + // as a flat list. Our merging semantics call for us to suppress blocks + // from base if at least one block of the same type appears in override. + // We explicitly do not try to correlate and deeply merge nested blocks, + // since we don't have enough context here to infer user intent. + + overriddenBlockTypes := make(map[string]bool) + for _, block := range override.Blocks { + if block.Type == "dynamic" { + overriddenBlockTypes[block.Labels[0]] = true + continue + } + overriddenBlockTypes[block.Type] = true + } + for _, block := range base.Blocks { + // We skip over dynamic blocks whose type label is an overridden type + // but note that below we do still leave them as dynamic blocks in + // the result because expanding the dynamic blocks that are left is + // done much later during the core graph walks, where we can safely + // evaluate the expressions. + if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { + continue + } + if overriddenBlockTypes[block.Type] { + continue + } + content.Blocks = append(content.Blocks, block) + } + for _, block := range override.Blocks { + content.Blocks = append(content.Blocks, block) + } + + return content +} + +func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := make(hcl.Attributes) + + baseAttrs, aDiags := b.Base.JustAttributes() + diags = append(diags, aDiags...) + overrideAttrs, aDiags := b.Override.JustAttributes() + diags = append(diags, aDiags...) + + for k, a := range baseAttrs { + ret[k] = a + } + for k, a := range overrideAttrs { + ret[k] = a + } + + return ret, diags +} + +func (b mergeBody) MissingItemRange() hcl.Range { + return b.Base.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go new file mode 100644 index 000000000..8c8398e0b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/named_values.go @@ -0,0 +1,354 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/typeexpr" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// A consistent detail message for all "not a valid identifier" diagnostics. +const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." + +// Variable represents a "variable" block in a module or file. +type Variable struct { + Name string + Description string + Default cty.Value + Type cty.Type + ParsingMode VariableParsingMode + + DescriptionSet bool + + DeclRange hcl.Range +} + +func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { + v := &Variable{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + // Unless we're building an override, we'll set some defaults + // which we might override with attributes below. We leave these + // as zero-value in the override case so we can recognize whether + // or not they are set when we merge. + if !override { + v.Type = cty.DynamicPseudoType + v.ParsingMode = VariableParseLiteral + } + + content, diags := block.Body.Content(variableBlockSchema) + + if !hclsyntax.ValidIdentifier(v.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + // Don't allow declaration of variables that would conflict with the + // reserved attribute and block type names in a "module" block, since + // these won't be usable for child modules. + for _, attr := range moduleBlockSchema.Attributes { + if attr.Name == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), + Subject: &block.LabelRanges[0], + }) + } + } + for _, blockS := range moduleBlockSchema.Blocks { + if blockS.Type == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), + Subject: &block.LabelRanges[0], + }) + } + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) + diags = append(diags, valDiags...) + v.DescriptionSet = true + } + + if attr, exists := content.Attributes["type"]; exists { + ty, parseMode, tyDiags := decodeVariableType(attr.Expr) + diags = append(diags, tyDiags...) + v.Type = ty + v.ParsingMode = parseMode + } + + if attr, exists := content.Attributes["default"]; exists { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + + // Convert the default to the expected type so we can catch invalid + // defaults early and allow later code to assume validity. + // Note that this depends on us having already processed any "type" + // attribute above. + // However, we can't do this if we're in an override file where + // the type might not be set; we'll catch that during merge. + if v.Type != cty.NilType { + var err error + val, err = convert.Convert(val, v.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), + Subject: attr.Expr.Range().Ptr(), + }) + val = cty.DynamicVal + } + } + + v.Default = val + } + + return v, diags +} + +func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { + if exprIsNativeQuotedString(expr) { + // Here we're accepting the pre-0.12 form of variable type argument where + // the string values "string", "list" and "map" are accepted has a hint + // about the type used primarily for deciding how to parse values + // given on the command line and in environment variables. + // Only the native syntax ends up in this codepath; we handle the + // JSON syntax (which is, of course, quoted even in the new format) + // in the normal codepath below. + val, diags := expr.Value(nil) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + str := val.AsString() + switch str { + case "string": + return cty.String, VariableParseLiteral, diags + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags + default: + return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: "Invalid legacy variable type hint", + Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, + Subject: expr.Range().Ptr(), + }} + } + } + + // First we'll deal with some shorthand forms that the HCL-level type + // expression parser doesn't include. These both emulate pre-0.12 behavior + // of allowing a list or map of any element type as long as all of the + // elements are consistent. This is the same as list(any) or map(any). + switch hcl.ExprAsKeyword(expr) { + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil + } + + ty, diags := typeexpr.TypeConstraint(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + + switch { + case ty.IsPrimitiveType(): + // Primitive types use literal parsing. + return ty, VariableParseLiteral, diags + default: + // Everything else uses HCL parsing + return ty, VariableParseHCL, diags + } +} + +// VariableParsingMode defines how values of a particular variable given by +// text-only mechanisms (command line arguments and environment variables) +// should be parsed to produce the final value. +type VariableParsingMode rune + +// VariableParseLiteral is a variable parsing mode that just takes the given +// string directly as a cty.String value. +const VariableParseLiteral VariableParsingMode = 'L' + +// VariableParseHCL is a variable parsing mode that attempts to parse the given +// string as an HCL expression and returns the result. +const VariableParseHCL VariableParsingMode = 'H' + +// Parse uses the receiving parsing mode to process the given variable value +// string, returning the result along with any diagnostics. +// +// A VariableParsingMode does not know the expected type of the corresponding +// variable, so it's the caller's responsibility to attempt to convert the +// result to the appropriate type and return to the user any diagnostics that +// conversion may produce. +// +// The given name is used to create a synthetic filename in case any diagnostics +// must be generated about the given string value. This should be the name +// of the root module variable whose value will be populated from the given +// string. +// +// If the returned diagnostics has errors, the returned value may not be +// valid. +func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { + switch m { + case VariableParseLiteral: + return cty.StringVal(value), nil + case VariableParseHCL: + fakeFilename := fmt.Sprintf("", name) + expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, valDiags := expr.Value(nil) + diags = append(diags, valDiags...) + return val, diags + default: + // Should never happen + panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) + } +} + +// Output represents an "output" block in a module or file. +type Output struct { + Name string + Description string + Expr hcl.Expression + DependsOn []hcl.Traversal + Sensitive bool + + DescriptionSet bool + SensitiveSet bool + + DeclRange hcl.Range +} + +func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { + o := &Output{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := outputBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, diags := block.Body.Content(schema) + + if !hclsyntax.ValidIdentifier(o.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid output name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) + diags = append(diags, valDiags...) + o.DescriptionSet = true + } + + if attr, exists := content.Attributes["value"]; exists { + o.Expr = attr.Expr + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) + diags = append(diags, valDiags...) + o.SensitiveSet = true + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + o.DependsOn = append(o.DependsOn, deps...) + } + + return o, diags +} + +// Local represents a single entry from a "locals" block in a module or file. +// The "locals" block itself is not represented, because it serves only to +// provide context for us to interpret its contents. +type Local struct { + Name string + Expr hcl.Expression + + DeclRange hcl.Range +} + +func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if len(attrs) == 0 { + return nil, diags + } + + locals := make([]*Local, 0, len(attrs)) + for name, attr := range attrs { + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local value name", + Detail: badIdentifierDetail, + Subject: &attr.NameRange, + }) + } + + locals = append(locals, &Local{ + Name: name, + Expr: attr.Expr, + DeclRange: attr.Range, + }) + } + return locals, diags +} + +var variableBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "default", + }, + { + Name: "type", + }, + }, +} + +var outputBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "value", + Required: true, + }, + { + Name: "depends_on", + }, + { + Name: "sensitive", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go new file mode 100644 index 000000000..2a621b577 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser.go @@ -0,0 +1,100 @@ +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclparse" + "github.com/spf13/afero" +) + +// Parser is the main interface to read configuration files and other related +// files from disk. +// +// It retains a cache of all files that are loaded so that they can be used +// to create source code snippets in diagnostics, etc. +type Parser struct { + fs afero.Afero + p *hclparse.Parser +} + +// NewParser creates and returns a new Parser that reads files from the given +// filesystem. If a nil filesystem is passed then the system's "real" filesystem +// will be used, via afero.OsFs. +func NewParser(fs afero.Fs) *Parser { + if fs == nil { + fs = afero.OsFs{} + } + + return &Parser{ + fs: afero.Afero{Fs: fs}, + p: hclparse.NewParser(), + } +} + +// LoadHCLFile is a low-level method that reads the file at the given path, +// parses it, and returns the hcl.Body representing its root. In many cases +// it is better to use one of the other Load*File methods on this type, +// which additionally decode the root body in some way and return a higher-level +// construct. +// +// If the file cannot be read at all -- e.g. because it does not exist -- then +// this method will return a nil body and error diagnostics. In this case +// callers may wish to ignore the provided error diagnostics and produce +// a more context-sensitive error instead. +// +// The file will be parsed using the HCL native syntax unless the filename +// ends with ".json", in which case the HCL JSON syntax will be used. +func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { + src, err := p.fs.ReadFile(path) + + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q could not be read.", path), + }, + } + } + + var file *hcl.File + var diags hcl.Diagnostics + switch { + case strings.HasSuffix(path, ".json"): + file, diags = p.p.ParseJSON(src, path) + default: + file, diags = p.p.ParseHCL(src, path) + } + + // If the returned file or body is nil, then we'll return a non-nil empty + // body so we'll meet our contract that nil means an error reading the file. + if file == nil || file.Body == nil { + return hcl.EmptyBody(), diags + } + + return file.Body, diags +} + +// Sources returns a map of the cached source buffers for all files that +// have been loaded through this parser, with source filenames (as requested +// when each file was opened) as the keys. +func (p *Parser) Sources() map[string][]byte { + return p.p.Sources() +} + +// ForceFileSource artificially adds source code to the cache of file sources, +// as if it had been loaded from the given filename. +// +// This should be used only in special situations where configuration is loaded +// some other way. Most callers should load configuration via methods of +// Parser, which will update the sources cache automatically. +func (p *Parser) ForceFileSource(filename string, src []byte) { + // We'll make a synthetic hcl.File here just so we can reuse the + // existing cache. + p.p.AddFile(filename, &hcl.File{ + Body: hcl.EmptyBody(), + Bytes: src, + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go new file mode 100644 index 000000000..d4cbc945c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config.go @@ -0,0 +1,247 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" +) + +// LoadConfigFile reads the file at the given path and parses it as a config +// file. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil *File will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, false) +} + +// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes +// certain required attribute constraints in order to interpret the given +// file as an overrides file. +func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, true) +} + +func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { + + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + file := &File{} + + var reqDiags hcl.Diagnostics + file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) + diags = append(diags, reqDiags...) + + content, contentDiags := body.Content(configFileSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, contentDiags := block.Body.Content(terraformBlockSchema) + diags = append(diags, contentDiags...) + + // We ignore the "terraform_version" attribute here because + // sniffCoreVersionRequirements already dealt with that above. + + for _, innerBlock := range content.Blocks { + switch innerBlock.Type { + + case "backend": + backendCfg, cfgDiags := decodeBackendBlock(innerBlock) + diags = append(diags, cfgDiags...) + if backendCfg != nil { + file.Backends = append(file.Backends, backendCfg) + } + + case "required_providers": + reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) + diags = append(diags, reqsDiags...) + file.ProviderRequirements = append(file.ProviderRequirements, reqs...) + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + case "provider": + cfg, cfgDiags := decodeProviderBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ProviderConfigs = append(file.ProviderConfigs, cfg) + } + + case "variable": + cfg, cfgDiags := decodeVariableBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Variables = append(file.Variables, cfg) + } + + case "locals": + defs, defsDiags := decodeLocalsBlock(block) + diags = append(diags, defsDiags...) + file.Locals = append(file.Locals, defs...) + + case "output": + cfg, cfgDiags := decodeOutputBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Outputs = append(file.Outputs, cfg) + } + + case "module": + cfg, cfgDiags := decodeModuleBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ModuleCalls = append(file.ModuleCalls, cfg) + } + + case "resource": + cfg, cfgDiags := decodeResourceBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ManagedResources = append(file.ManagedResources, cfg) + } + + case "data": + cfg, cfgDiags := decodeDataBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.DataResources = append(file.DataResources, cfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + return file, diags +} + +// sniffCoreVersionRequirements does minimal parsing of the given body for +// "terraform" blocks with "required_version" attributes, returning the +// requirements found. +// +// This is intended to maximize the chance that we'll be able to read the +// requirements (syntax errors notwithstanding) even if the config file contains +// constructs that might've been added in future Terraform versions +// +// This is a "best effort" sort of method which will return constraints it is +// able to find, but may return no constraints at all if the given body is +// so invalid that it cannot be decoded at all. +func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) + + var constraints []VersionConstraint + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) + diags = append(diags, blockDiags...) + + attr, exists := content.Attributes["required_version"] + if !exists { + continue + } + + constraint, constraintDiags := decodeVersionConstraint(attr) + diags = append(diags, constraintDiags...) + if !constraintDiags.HasErrors() { + constraints = append(constraints, constraint) + } + } + + return constraints, diags +} + +// configFileSchema is the schema for the top-level of a config file. We use +// the low-level HCL API for this level so we can easily deal with each +// block type separately with its own decoding logic. +var configFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "locals", + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + }, +} + +// terraformBlockSchema is the schema for a top-level "terraform" block in +// a configuration file. +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "backend", + LabelNames: []string{"type"}, + }, + { + Type: "required_providers", + }, + }, +} + +// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffRootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, +} + +// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go new file mode 100644 index 000000000..afdd69833 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_config_dir.go @@ -0,0 +1,163 @@ +package configs + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// LoadConfigDir reads the .tf and .tf.json files in the given directory +// as config files (using LoadConfigFile) and then combines these files into +// a single Module. +// +// If this method returns nil, that indicates that the given directory does not +// exist at all or could not be opened for some reason. Callers may wish to +// detect this case and ignore the returned diagnostics so that they can +// produce a more context-aware error message in that case. +// +// If this method returns a non-nil module while error diagnostics are returned +// then the module may be incomplete but can be used carefully for static +// analysis. +// +// This file does not consider a directory with no files to be an error, and +// will simply return an empty module in that case. Callers should first call +// Parser.IsConfigDir if they wish to recognize that situation. +// +// .tf files are parsed using the HCL native syntax while .tf.json files are +// parsed using the HCL JSON syntax. +func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, diags := p.dirFiles(path) + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + + mod, modDiags := NewModule(primary, override) + diags = append(diags, modDiags...) + + mod.SourceDir = path + + return mod, diags +} + +// ConfigDirFiles returns lists of the primary and override files configuration +// files in the given directory. +// +// If the given directory does not exist or cannot be read, error diagnostics +// are returned. If errors are returned, the resulting lists may be incomplete. +func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + return p.dirFiles(dir) +} + +// IsConfigDir determines whether the given path refers to a directory that +// exists and contains at least one Terraform config file (with a .tf or +// .tf.json extension.) +func (p *Parser) IsConfigDir(path string) bool { + primaryPaths, overridePaths, _ := p.dirFiles(path) + return (len(primaryPaths) + len(overridePaths)) > 0 +} + +func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { + var files []*File + var diags hcl.Diagnostics + + for _, path := range paths { + var f *File + var fDiags hcl.Diagnostics + if override { + f, fDiags = p.LoadConfigFileOverride(path) + } else { + f, fDiags = p.LoadConfigFile(path) + } + diags = append(diags, fDiags...) + if f != nil { + files = append(files, f) + } + } + + return files, diags +} + +func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + infos, err := p.fs.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || IsIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// IsIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func IsIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} + +// IsEmptyDir returns true if the given filesystem path contains no Terraform +// configuration files. +// +// Unlike the methods of the Parser type, this function always consults the +// real filesystem, and thus it isn't appropriate to use when working with +// configuration loaded from a plan file. +func IsEmptyDir(path string) (bool, error) { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + return true, nil + } + + p := NewParser(nil) + fs, os, err := p.dirFiles(path) + if err != nil { + return false, err + } + + return len(fs) == 0 && len(os) == 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go new file mode 100644 index 000000000..10d98e5b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/parser_values.go @@ -0,0 +1,43 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" +) + +// LoadValuesFile reads the file at the given path and parses it as a "values +// file", which is an HCL config file whose top-level attributes are treated +// as arbitrary key.value pairs. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil map will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + vals := make(map[string]cty.Value) + attrs, attrDiags := body.JustAttributes() + diags = append(diags, attrDiags...) + if attrs == nil { + return vals, diags + } + + for name, attr := range attrs { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + vals[name] = val + } + + return vals, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go new file mode 100644 index 000000000..cb9ba1f3f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provider.go @@ -0,0 +1,144 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + content, config, diags := block.Body.PartialContent(providerBlockSchema) + + provider := &Provider{ + Name: block.Labels[0], + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ProviderRequirement represents a declaration of a dependency on a particular +// provider version without actually configuring that provider. This is used in +// child modules that expect a provider to be passed in from their parent. +type ProviderRequirement struct { + Name string + Requirement VersionConstraint +} + +func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + var reqs []*ProviderRequirement + for name, attr := range attrs { + req, reqDiags := decodeVersionConstraint(attr) + diags = append(diags, reqDiags...) + if !diags.HasErrors() { + reqs = append(reqs, &ProviderRequirement{ + Name: name, + Requirement: req, + }) + } + } + return reqs, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + // _All_ of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go new file mode 100644 index 000000000..47b656791 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioner.go @@ -0,0 +1,150 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" +) + +// Provisioner represents a "provisioner" block when used within a +// "resource" block in a module or file. +type Provisioner struct { + Type string + Config hcl.Body + Connection *Connection + When ProvisionerWhen + OnFailure ProvisionerOnFailure + + DeclRange hcl.Range + TypeRange hcl.Range +} + +func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { + pv := &Provisioner{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + DeclRange: block.DefRange, + When: ProvisionerWhenCreate, + OnFailure: ProvisionerOnFailureFail, + } + + content, config, diags := block.Body.PartialContent(provisionerBlockSchema) + pv.Config = config + + if attr, exists := content.Attributes["when"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "create": + pv.When = ProvisionerWhenCreate + case "destroy": + pv.When = ProvisionerWhenDestroy + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"when\" keyword", + Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", + Subject: expr.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["on_failure"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "continue": + pv.OnFailure = ProvisionerOnFailureContinue + case "fail": + pv.OnFailure = ProvisionerOnFailureFail + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"on_failure\" keyword", + Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + //conn, connDiags := decodeConnectionBlock(block) + //diags = append(diags, connDiags...) + pv.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provisioner block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return pv, diags +} + +// Connection represents a "connection" block when used within either a +// "resource" or "provisioner" block in a module or file. +type Connection struct { + Config hcl.Body + + DeclRange hcl.Range +} + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerWhen + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +//go:generate go run golang.org/x/tools/cmd/stringer -type ProvisionerOnFailure + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when"}, + {Name: "on_failure"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "connection"}, + {Type: "lifecycle"}, // reserved for future use + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go new file mode 100644 index 000000000..7ff5a6e00 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisioneronfailure_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerOnFailureInvalid-0] + _ = x[ProvisionerOnFailureContinue-1] + _ = x[ProvisionerOnFailureFail-2] +} + +const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" + +var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} + +func (i ProvisionerOnFailure) String() string { + if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { + return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go new file mode 100644 index 000000000..9f21b3ac6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/provisionerwhen_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerWhenInvalid-0] + _ = x[ProvisionerWhenCreate-1] + _ = x[ProvisionerWhenDestroy-2] +} + +const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" + +var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} + +func (i ProvisionerWhen) String() string { + if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { + return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go new file mode 100644 index 000000000..cd9991a38 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/resource.go @@ -0,0 +1,490 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclsyntax" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + + DependsOn []hcl.Traversal + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration +// that should be used for this resource. This function implements the +// default behavior of extracting the type from the resource type name if +// an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { + if r.ProviderConfigRef == nil { + return r.Addr().DefaultProviderConfig() + } + + return addrs.ProviderConfig{ + Type: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, diags := block.Body.PartialContent(resourceBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same resource block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Deprecated ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, diags := block.Body.PartialContent(dataBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // Cannot have count and for_each on the same data block + if r.Count != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid combination of "count" and "for_each"`, + Detail: `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`, + Subject: &attr.NameRange, + }) + } + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + for _, block := range content.Blocks { + // All of the block types we accept are just reserved for future use, but some get a specialized error message. + switch block.Type { + case "lifecycle": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported lifecycle block", + Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", + Subject: &block.DefRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: traversal.RootName(), + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +var resourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, // reserved for future use + {Type: "locals"}, // reserved for future use + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go new file mode 100644 index 000000000..cd914e5db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/synth_body.go @@ -0,0 +1,118 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes +// corresponding to the elements given in the values map. +// +// This is useful in situations where, for example, values provided on the +// command line can override values given in configuration, using MergeBodies. +// +// The given filename is used in case any diagnostics are returned. Since +// the created body is synthetic, it is likely that this will not be a "real" +// filename. For example, if from a command line argument it could be +// a representation of that argument's name, such as "-var=...". +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return synthBody{ + Filename: filename, + Values: values, + } +} + +type synthBody struct { + Filename string + Values map[string]cty.Value +} + +func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remain, diags := b.PartialContent(schema) + remainS := remain.(synthBody) + for name := range remainS.Values { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), + Subject: b.synthRange().Ptr(), + }) + } + return content, diags +} + +func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + MissingItemRange: b.synthRange(), + } + + remainValues := make(map[string]cty.Value) + for attrName, val := range b.Values { + remainValues[attrName] = val + } + + for _, attrS := range schema.Attributes { + delete(remainValues, attrS.Name) + val, defined := b.Values[attrS.Name] + if !defined { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.synthRange().Ptr(), + }) + } + continue + } + content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) + } + + // We just ignore blocks altogether, because this body type never has + // nested blocks. + + remain := synthBody{ + Filename: b.Filename, + Values: remainValues, + } + + return content, remain, diags +} + +func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + ret := make(hcl.Attributes) + for name, val := range b.Values { + ret[name] = b.synthAttribute(name, val) + } + return ret, nil +} + +func (b synthBody) MissingItemRange() hcl.Range { + return b.synthRange() +} + +func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { + rng := b.synthRange() + return &hcl.Attribute{ + Name: name, + Expr: &hclsyntax.LiteralValueExpr{ + Val: val, + SrcRange: rng, + }, + NameRange: rng, + Range: rng, + } +} + +func (b synthBody) synthRange() hcl.Range { + return hcl.Range{ + Filename: b.Filename, + Start: hcl.Pos{Line: 1, Column: 1}, + End: hcl.Pos{Line: 1, Column: 1}, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go new file mode 100644 index 000000000..e135546fb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/util.go @@ -0,0 +1,63 @@ +package configs + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" +) + +// exprIsNativeQuotedString determines whether the given expression looks like +// it's a quoted string in the HCL native syntax. +// +// This should be used sparingly only for situations where our legacy HCL +// decoding would've expected a keyword or reference in quotes but our new +// decoding expects the keyword or reference to be provided directly as +// an identifier-based expression. +func exprIsNativeQuotedString(expr hcl.Expression) bool { + _, ok := expr.(*hclsyntax.TemplateExpr) + return ok +} + +// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is +// equivalent except that any required attributes are forced to not be required. +// +// This is useful for dealing with "override" config files, which are allowed +// to omit things that they don't wish to override from the main configuration. +// +// The returned schema may have some pointers in common with the given schema, +// so neither the given schema nor the returned schema should be modified after +// using this function in order to avoid confusion. +// +// Overrides are rarely used, so it's recommended to just create the override +// schema on the fly only when it's needed, rather than storing it in a global +// variable as we tend to do for a primary schema. +func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} + +// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that +// is equivalent except that it accepts an additional block type "dynamic" with +// a single label, used to recognize usage of the HCL dynamic block extension. +func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + + copy(ret.Blocks, schema.Blocks) + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, + }) + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go new file mode 100644 index 000000000..c02ad4b55 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variable_type_hint.go @@ -0,0 +1,45 @@ +package configs + +// VariableTypeHint is an enumeration used for the Variable.TypeHint field, +// which is an incompletely-specified type for the variable which is used +// as a hint for whether a value provided in an ambiguous context (on the +// command line or in an environment variable) should be taken literally as a +// string or parsed as an HCL expression to produce a data structure. +// +// The type hint is applied to runtime values as well, but since it does not +// accurately describe a precise type it is not fully-sufficient to infer +// the dynamic type of a value passed through a variable. +// +// These hints use inaccurate terminology for historical reasons. Full details +// are in the documentation for each constant in this enumeration, but in +// summary: +// +// TypeHintString requires a primitive type +// TypeHintList requires a type that could be converted to a tuple +// TypeHintMap requires a type that could be converted to an object +type VariableTypeHint rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type VariableTypeHint + +// TypeHintNone indicates the absence of a type hint. Values specified in +// ambiguous contexts will be treated as literal strings, as if TypeHintString +// were selected, but no runtime value checks will be applied. This is reasonable +// type hint for a module that is never intended to be used at the top-level +// of a configuration, since descendent modules never receive values from +// ambiguous contexts. +const TypeHintNone VariableTypeHint = 0 + +// TypeHintString spec indicates that a value provided in an ambiguous context +// should be treated as a literal string, and additionally requires that the +// runtime value for the variable is of a primitive type (string, number, bool). +const TypeHintString VariableTypeHint = 'S' + +// TypeHintList indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an tuple, list, or set type. +const TypeHintList VariableTypeHint = 'L' + +// TypeHintMap indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an object or map type. +const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go new file mode 100644 index 000000000..2b50428ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/variabletypehint_string.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeHintNone-0] + _ = x[TypeHintString-83] + _ = x[TypeHintList-76] + _ = x[TypeHintMap-77] +} + +const ( + _VariableTypeHint_name_0 = "TypeHintNone" + _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" + _VariableTypeHint_name_2 = "TypeHintString" +) + +var ( + _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} +) + +func (i VariableTypeHint) String() string { + switch { + case i == 0: + return _VariableTypeHint_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] + case i == 83: + return _VariableTypeHint_name_2 + default: + return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go new file mode 100644 index 000000000..0f541dc71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/configs/version_constraint.go @@ -0,0 +1,71 @@ +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// VersionConstraint represents a version constraint on some resource +// (e.g. Terraform Core, a provider, a module, ...) that carries with it +// a source range so that a helpful diagnostic can be printed in the event +// that a particular constraint does not match. +type VersionConstraint struct { + Required version.Constraints + DeclRange hcl.Range +} + +func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { + ret := VersionConstraint{ + DeclRange: attr.Range, + } + + val, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + return ret, diags + } + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + if val.IsNull() { + // A null version constraint is strange, but we'll just treat it + // like an empty constraint set. + return ret, diags + } + + if !val.IsWhollyKnown() { + // If there is a syntax error, HCL sets the value of the given attribute + // to cty.DynamicVal. A diagnostic for the syntax error will already + // bubble up, so we will move forward gracefully here. + return ret, diags + } + + constraintStr := val.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + ret.Required = constraints + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go new file mode 100644 index 000000000..a150af961 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dag.go @@ -0,0 +1,301 @@ +package dag + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/go-multierror" +) + +// AcyclicGraph is a specialization of Graph that cannot have cycles. With +// this property, we get the property of sane graph traversal. +type AcyclicGraph struct { + Graph +} + +// WalkFunc is the callback used for walking the graph. +type WalkFunc func(Vertex) tfdiags.Diagnostics + +// DepthWalkFunc is a walk function that also receives the current depth of the +// walk as an argument +type DepthWalkFunc func(Vertex, int) error + +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.DownEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.DepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.UpEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Root returns the root of the DAG, or an error. +// +// Complexity: O(V) +func (g *AcyclicGraph) Root() (Vertex, error) { + roots := make([]Vertex, 0, 1) + for _, v := range g.Vertices() { + if g.UpEdges(v).Len() == 0 { + roots = append(roots, v) + } + } + + if len(roots) > 1 { + // TODO(mitchellh): make this error message a lot better + return nil, fmt.Errorf("multiple roots: %#v", roots) + } + + if len(roots) == 0 { + return nil, fmt.Errorf("no roots found") + } + + return roots[0], nil +} + +// TransitiveReduction performs the transitive reduction of graph g in place. +// The transitive reduction of a graph is a graph with as few edges as +// possible with the same reachability as the original graph. This means +// that if there are three nodes A => B => C, and A connects to both +// B and C, and B connects to C, then the transitive reduction is the +// same graph with only a single edge between A and B, and a single edge +// between B and C. +// +// The graph must be valid for this operation to behave properly. If +// Validate() returns an error, the behavior is undefined and the results +// will likely be unexpected. +// +// Complexity: O(V(V+E)), or asymptotically O(VE) +func (g *AcyclicGraph) TransitiveReduction() { + // For each vertex u in graph g, do a DFS starting from each vertex + // v such that the edge (u,v) exists (v is a direct descendant of u). + // + // For each v-prime reachable from v, remove the edge (u, v-prime). + defer g.debug.BeginOperation("TransitiveReduction", "").End("") + + for _, u := range g.Vertices() { + uTargets := g.DownEdges(u) + vs := AsVertexList(g.DownEdges(u)) + + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { + shared := uTargets.Intersection(g.DownEdges(v)) + for _, vPrime := range AsVertexList(shared) { + g.RemoveEdge(BasicEdge(u, vPrime)) + } + + return nil + }) + } +} + +// Validate validates the DAG. A DAG is valid if it has a single root +// with no cycles. +func (g *AcyclicGraph) Validate() error { + if _, err := g.Root(); err != nil { + return err + } + + // Look for cycles of more than 1 component + var err error + cycles := g.Cycles() + if len(cycles) > 0 { + for _, cycle := range cycles { + cycleStr := make([]string, len(cycle)) + for j, vertex := range cycle { + cycleStr[j] = VertexName(vertex) + } + + err = multierror.Append(err, fmt.Errorf( + "Cycle: %s", strings.Join(cycleStr, ", "))) + } + } + + // Look for cycles to self + for _, e := range g.Edges() { + if e.Source() == e.Target() { + err = multierror.Append(err, fmt.Errorf( + "Self reference: %s", VertexName(e.Source()))) + } + } + + return err +} + +func (g *AcyclicGraph) Cycles() [][]Vertex { + var cycles [][]Vertex + for _, cycle := range StronglyConnected(&g.Graph) { + if len(cycle) > 1 { + cycles = append(cycles, cycle) + } + } + return cycles +} + +// Walk walks the graph, calling your callback as each node is visited. +// This will walk nodes in parallel if it can. The resulting diagnostics +// contains problems from all graphs visited, in no particular order. +func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { + defer g.debug.BeginOperation(typeWalk, "").End("") + + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() +} + +// simple convenience helper for converting a dag.Set to a []Vertex +func AsVertexList(s *Set) []Vertex { + rawList := s.List() + vertexList := make([]Vertex, len(rawList)) + for i, raw := range rawList { + vertexList[i] = raw.(Vertex) + } + return vertexList +} + +type vertexAtDepth struct { + Vertex Vertex + Depth int +} + +// depthFirstWalk does a depth-first walk of the graph starting from +// the vertices in start. +func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + + // Visit targets of this in a consistent order. + targets := AsVertexList(g.DownEdges(current.Vertex)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + } + + return nil +} + +// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Add next set of targets in a consistent order. + targets := AsVertexList(g.UpEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + } + + return nil +} + +// byVertexName implements sort.Interface so a list of Vertices can be sorted +// consistently by their VertexName +type byVertexName []Vertex + +func (b byVertexName) Len() int { return len(b) } +func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byVertexName) Less(i, j int) bool { + return VertexName(b[i]) < VertexName(b[j]) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go new file mode 100644 index 000000000..65a351b6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/dot.go @@ -0,0 +1,278 @@ +package dag + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// DotOpts are the options for generating a dot formatted Graph. +type DotOpts struct { + // Allows some nodes to decide to only show themselves when the user has + // requested the "verbose" graph. + Verbose bool + + // Highlight Cycles + DrawCycles bool + + // How many levels to expand modules as we draw + MaxDepth int + + // use this to keep the cluster_ naming convention from the previous dot writer + cluster bool +} + +// GraphNodeDotter can be implemented by a node to cause it to be included +// in the dot graph. The Dot method will be called which is expected to +// return a representation of this node. +type GraphNodeDotter interface { + // Dot is called to return the dot formatting for the node. + // The first parameter is the title of the node. + // The second parameter includes user-specified options that affect the dot + // graph. See GraphDotOpts below for details. + DotNode(string, *DotOpts) *DotNode +} + +// DotNode provides a structure for Vertices to return in order to specify their +// dot format. +type DotNode struct { + Name string + Attrs map[string]string +} + +// Returns the DOT representation of this Graph. +func (g *marshalGraph) Dot(opts *DotOpts) []byte { + if opts == nil { + opts = &DotOpts{ + DrawCycles: true, + MaxDepth: -1, + Verbose: true, + } + } + + var w indentWriter + w.WriteString("digraph {\n") + w.Indent() + + // some dot defaults + w.WriteString(`compound = "true"` + "\n") + w.WriteString(`newrank = "true"` + "\n") + + // the top level graph is written as the first subgraph + w.WriteString(`subgraph "root" {` + "\n") + g.writeBody(opts, &w) + + // cluster isn't really used other than for naming purposes in some graphs + opts.cluster = opts.MaxDepth != 0 + maxDepth := opts.MaxDepth + if maxDepth == 0 { + maxDepth = -1 + } + + for _, s := range g.Subgraphs { + g.writeSubgraph(s, opts, maxDepth, &w) + } + + w.Unindent() + w.WriteString("}\n") + return w.Bytes() +} + +func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + name := v.Name + attrs := v.Attrs + if v.graphNodeDotter != nil { + node := v.graphNodeDotter.DotNode(name, opts) + if node == nil { + return []byte{} + } + + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[k] = v + } + for k, v := range node.Attrs { + newAttrs[k] = v + } + + name = node.Name + attrs = newAttrs + } + + buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) + writeAttrs(&buf, attrs) + buf.WriteByte('\n') + + return buf.Bytes() +} + +func (e *marshalEdge) dot(g *marshalGraph) string { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + sourceName := g.vertexByID(e.Source).Name + targetName := g.vertexByID(e.Target).Name + s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) + buf.WriteString(s) + writeAttrs(&buf, e.Attrs) + + return buf.String() +} + +func cycleDot(e *marshalEdge, g *marshalGraph) string { + return e.dot(g) + ` [color = "red", penwidth = "2.0"]` +} + +// Write the subgraph body. The is recursive, and the depth argument is used to +// record the current depth of iteration. +func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { + if depth == 0 { + return + } + depth-- + + name := sg.Name + if opts.cluster { + // we prefix with cluster_ to match the old dot output + name = "cluster_" + name + sg.Attrs["label"] = sg.Name + } + w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) + sg.writeBody(opts, w) + + for _, sg := range sg.Subgraphs { + g.writeSubgraph(sg, opts, depth, w) + } +} + +func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { + w.Indent() + + for _, as := range attrStrings(g.Attrs) { + w.WriteString(as + "\n") + } + + // list of Vertices that aren't to be included in the dot output + skip := map[string]bool{} + + for _, v := range g.Vertices { + if v.graphNodeDotter == nil { + skip[v.ID] = true + continue + } + + w.Write(v.dot(g, opts)) + } + + var dotEdges []string + + if opts.DrawCycles { + for _, c := range g.Cycles { + if len(c) < 2 { + continue + } + + for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { + if j >= len(c) { + j = 0 + } + src := c[i] + tgt := c[j] + + if skip[src.ID] || skip[tgt.ID] { + continue + } + + e := &marshalEdge{ + Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), + Source: src.ID, + Target: tgt.ID, + Attrs: make(map[string]string), + } + + dotEdges = append(dotEdges, cycleDot(e, g)) + src = tgt + } + } + } + + for _, e := range g.Edges { + dotEdges = append(dotEdges, e.dot(g)) + } + + // srot these again to match the old output + sort.Strings(dotEdges) + + for _, e := range dotEdges { + w.WriteString(e + "\n") + } + + w.Unindent() + w.WriteString("}\n") +} + +func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { + if len(attrs) > 0 { + buf.WriteString(" [") + buf.WriteString(strings.Join(attrStrings(attrs), ", ")) + buf.WriteString("]") + } +} + +func attrStrings(attrs map[string]string) []string { + strings := make([]string, 0, len(attrs)) + for k, v := range attrs { + strings = append(strings, fmt.Sprintf("%s = %q", k, v)) + } + sort.Strings(strings) + return strings +} + +// Provide a bytes.Buffer like structure, which will indent when starting a +// newline. +type indentWriter struct { + bytes.Buffer + level int +} + +func (w *indentWriter) indent() { + newline := []byte("\n") + if !bytes.HasSuffix(w.Bytes(), newline) { + return + } + for i := 0; i < w.level; i++ { + w.Buffer.WriteString("\t") + } +} + +// Indent increases indentation by 1 +func (w *indentWriter) Indent() { w.level++ } + +// Unindent decreases indentation by 1 +func (w *indentWriter) Unindent() { w.level-- } + +// the following methods intercecpt the byte.Buffer writes and insert the +// indentation when starting a new line. +func (w *indentWriter) Write(b []byte) (int, error) { + w.indent() + return w.Buffer.Write(b) +} + +func (w *indentWriter) WriteString(s string) (int, error) { + w.indent() + return w.Buffer.WriteString(s) +} +func (w *indentWriter) WriteByte(b byte) error { + w.indent() + return w.Buffer.WriteByte(b) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go new file mode 100644 index 000000000..f0d99ee3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/edge.go @@ -0,0 +1,37 @@ +package dag + +import ( + "fmt" +) + +// Edge represents an edge in the graph, with a source and target vertex. +type Edge interface { + Source() Vertex + Target() Vertex + + Hashable +} + +// BasicEdge returns an Edge implementation that simply tracks the source +// and target given as-is. +func BasicEdge(source, target Vertex) Edge { + return &basicEdge{S: source, T: target} +} + +// basicEdge is a basic implementation of Edge that has the source and +// target vertex. +type basicEdge struct { + S, T Vertex +} + +func (e *basicEdge) Hashcode() interface{} { + return fmt.Sprintf("%p-%p", e.S, e.T) +} + +func (e *basicEdge) Source() Vertex { + return e.S +} + +func (e *basicEdge) Target() Vertex { + return e.T +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go new file mode 100644 index 000000000..e7517a206 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/graph.go @@ -0,0 +1,391 @@ +package dag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "sort" +) + +// Graph is used to represent a dependency graph. +type Graph struct { + vertices *Set + edges *Set + downEdges map[interface{}]*Set + upEdges map[interface{}]*Set + + // JSON encoder for recording debug information + debug *encoder +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher +} + +// Vertex of the graph. +type Vertex interface{} + +// NamedVertex is an optional interface that can be implemented by Vertex +// to give it a human-friendly name that is used for outputting the graph. +type NamedVertex interface { + Vertex + Name() string +} + +func (g *Graph) DirectedGraph() Grapher { + return g +} + +// Vertices returns the list of all the vertices in the graph. +func (g *Graph) Vertices() []Vertex { + list := g.vertices.List() + result := make([]Vertex, len(list)) + for i, v := range list { + result[i] = v.(Vertex) + } + + return result +} + +// Edges returns the list of all the edges in the graph. +func (g *Graph) Edges() []Edge { + list := g.edges.List() + result := make([]Edge, len(list)) + for i, v := range list { + result[i] = v.(Edge) + } + + return result +} + +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + +// HasVertex checks if the given Vertex is present in the graph. +func (g *Graph) HasVertex(v Vertex) bool { + return g.vertices.Include(v) +} + +// HasEdge checks if the given Edge is present in the graph. +func (g *Graph) HasEdge(e Edge) bool { + return g.edges.Include(e) +} + +// Add adds a vertex to the graph. This is safe to call multiple time with +// the same Vertex. +func (g *Graph) Add(v Vertex) Vertex { + g.init() + g.vertices.Add(v) + g.debug.Add(v) + return v +} + +// Remove removes a vertex from the graph. This will also remove any +// edges with this vertex as a source or target. +func (g *Graph) Remove(v Vertex) Vertex { + // Delete the vertex itself + g.vertices.Delete(v) + g.debug.Remove(v) + + // Delete the edges to non-existent things + for _, target := range g.DownEdges(v).List() { + g.RemoveEdge(BasicEdge(v, target)) + } + for _, source := range g.UpEdges(v).List() { + g.RemoveEdge(BasicEdge(source, v)) + } + + return nil +} + +// Replace replaces the original Vertex with replacement. If the original +// does not exist within the graph, then false is returned. Otherwise, true +// is returned. +func (g *Graph) Replace(original, replacement Vertex) bool { + // If we don't have the original, we can't do anything + if !g.vertices.Include(original) { + return false + } + + defer g.debug.BeginOperation("Replace", "").End("") + + // If they're the same, then don't do anything + if original == replacement { + return true + } + + // Add our new vertex, then copy all the edges + g.Add(replacement) + for _, target := range g.DownEdges(original).List() { + g.Connect(BasicEdge(replacement, target)) + } + for _, source := range g.UpEdges(original).List() { + g.Connect(BasicEdge(source, replacement)) + } + + // Remove our old vertex, which will also remove all the edges + g.Remove(original) + + return true +} + +// RemoveEdge removes an edge from the graph. +func (g *Graph) RemoveEdge(edge Edge) { + g.init() + g.debug.RemoveEdge(edge) + + // Delete the edge from the set + g.edges.Delete(edge) + + // Delete the up/down edges + if s, ok := g.downEdges[hashcode(edge.Source())]; ok { + s.Delete(edge.Target()) + } + if s, ok := g.upEdges[hashcode(edge.Target())]; ok { + s.Delete(edge.Source()) + } +} + +// DownEdges returns the outward edges from the source Vertex v. +func (g *Graph) DownEdges(v Vertex) *Set { + g.init() + return g.downEdges[hashcode(v)] +} + +// UpEdges returns the inward edges to the destination Vertex v. +func (g *Graph) UpEdges(v Vertex) *Set { + g.init() + return g.upEdges[hashcode(v)] +} + +// Connect adds an edge with the given source and target. This is safe to +// call multiple times with the same value. Note that the same value is +// verified through pointer equality of the vertices, not through the +// value of the edge itself. +func (g *Graph) Connect(edge Edge) { + g.init() + g.debug.Connect(edge) + + source := edge.Source() + target := edge.Target() + sourceCode := hashcode(source) + targetCode := hashcode(target) + + // Do we have this already? If so, don't add it again. + if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { + return + } + + // Add the edge to the set + g.edges.Add(edge) + + // Add the down edge + s, ok := g.downEdges[sourceCode] + if !ok { + s = new(Set) + g.downEdges[sourceCode] = s + } + s.Add(target) + + // Add the up edge + s, ok = g.upEdges[targetCode] + if !ok { + s = new(Set) + g.upEdges[targetCode] = s + } + s.Add(source) +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) StringWithNodeTypes() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + targetNodes := make(map[string]Vertex) + for _, target := range targets.List() { + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) + } + } + + return buf.String() +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) String() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s\n", name)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + for _, target := range targets.List() { + deps = append(deps, VertexName(target)) + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s\n", d)) + } + } + + return buf.String() +} + +func (g *Graph) init() { + if g.vertices == nil { + g.vertices = new(Set) + } + if g.edges == nil { + g.edges = new(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]*Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]*Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// MarshalJSON returns a JSON representation of the entire Graph. +func (g *Graph) MarshalJSON() ([]byte, error) { + dg := newMarshalGraph("root", g) + return json.MarshalIndent(dg, "", " ") +} + +// SetDebugWriter sets the io.Writer where the Graph will record debug +// information. After this is set, the graph will immediately encode itself to +// the stream, and continue to record all subsequent operations. +func (g *Graph) SetDebugWriter(w io.Writer) { + g.debug = &encoder{w: w} + g.debug.Encode(newMarshalGraph("root", g)) +} + +// DebugVertexInfo encodes arbitrary information about a vertex in the graph +// debug logs. +func (g *Graph) DebugVertexInfo(v Vertex, info string) { + va := newVertexInfo(typeVertexInfo, v, info) + g.debug.Encode(va) +} + +// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug +// logs. +func (g *Graph) DebugEdgeInfo(e Edge, info string) { + ea := newEdgeInfo(typeEdgeInfo, e, info) + g.debug.Encode(ea) +} + +// DebugVisitInfo records a visit to a Vertex during a walk operation. +func (g *Graph) DebugVisitInfo(v Vertex, info string) { + vi := newVertexInfo(typeVisitInfo, v, info) + g.debug.Encode(vi) +} + +// DebugOperation marks the start of a set of graph transformations in +// the debug log, and returns a DebugOperationEnd func, which marks the end of +// the operation in the log. Additional information can be added to the log via +// the info parameter. +// +// The returned func's End method allows this method to be called from a single +// defer statement: +// defer g.DebugOperationBegin("OpName", "operating").End("") +// +// The returned function must be called to properly close the logical operation +// in the logs. +func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { + return g.debug.BeginOperation(operation, info) +} + +// VertexName returns the name of a vertex. +func VertexName(raw Vertex) string { + switch v := raw.(type) { + case NamedVertex: + return v.Name() + case fmt.Stringer: + return fmt.Sprintf("%s", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go new file mode 100644 index 000000000..7b23ea9c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/marshal.go @@ -0,0 +1,460 @@ +package dag + +import ( + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +const ( + typeOperation = "Operation" + typeTransform = "Transform" + typeWalk = "Walk" + typeDepthFirstWalk = "DepthFirstWalk" + typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" + typeTransitiveReduction = "TransitiveReduction" + typeEdgeInfo = "EdgeInfo" + typeVertexInfo = "VertexInfo" + typeVisitInfo = "VisitInfo" +) + +// the marshal* structs are for serialization of the graph data. +type marshalGraph struct { + // Type is always "Graph", for identification as a top level object in the + // JSON stream. + Type string + + // Each marshal structure requires a unique ID so that it can be referenced + // by other structures. + ID string `json:",omitempty"` + + // Human readable name for this graph. + Name string `json:",omitempty"` + + // Arbitrary attributes that can be added to the output. + Attrs map[string]string `json:",omitempty"` + + // List of graph vertices, sorted by ID. + Vertices []*marshalVertex `json:",omitempty"` + + // List of edges, sorted by Source ID. + Edges []*marshalEdge `json:",omitempty"` + + // Any number of subgraphs. A subgraph itself is considered a vertex, and + // may be referenced by either end of an edge. + Subgraphs []*marshalGraph `json:",omitempty"` + + // Any lists of vertices that are included in cycles. + Cycles [][]*marshalVertex `json:",omitempty"` +} + +// The add, remove, connect, removeEdge methods mirror the basic Graph +// manipulations to reconstruct a marshalGraph from a debug log. +func (g *marshalGraph) add(v *marshalVertex) { + g.Vertices = append(g.Vertices, v) + sort.Sort(vertices(g.Vertices)) +} + +func (g *marshalGraph) remove(v *marshalVertex) { + for i, existing := range g.Vertices { + if v.ID == existing.ID { + g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) + return + } + } +} + +func (g *marshalGraph) connect(e *marshalEdge) { + g.Edges = append(g.Edges, e) + sort.Sort(edges(g.Edges)) +} + +func (g *marshalGraph) removeEdge(e *marshalEdge) { + for i, existing := range g.Edges { + if e.Source == existing.Source && e.Target == existing.Target { + g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) + return + } + } +} + +func (g *marshalGraph) vertexByID(id string) *marshalVertex { + for _, v := range g.Vertices { + if id == v.ID { + return v + } + } + return nil +} + +type marshalVertex struct { + // Unique ID, used to reference this vertex from other structures. + ID string + + // Human readable name + Name string `json:",omitempty"` + + Attrs map[string]string `json:",omitempty"` + + // This is to help transition from the old Dot interfaces. We record if the + // node was a GraphNodeDotter here, so we can call it to get attributes. + graphNodeDotter GraphNodeDotter +} + +func newMarshalVertex(v Vertex) *marshalVertex { + dn, ok := v.(GraphNodeDotter) + if !ok { + dn = nil + } + + return &marshalVertex{ + ID: marshalVertexID(v), + Name: VertexName(v), + Attrs: make(map[string]string), + graphNodeDotter: dn, + } +} + +// vertices is a sort.Interface implementation for sorting vertices by ID +type vertices []*marshalVertex + +func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } +func (v vertices) Len() int { return len(v) } +func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type marshalEdge struct { + // Human readable name + Name string + + // Source and Target Vertices by ID + Source string + Target string + + Attrs map[string]string `json:",omitempty"` +} + +func newMarshalEdge(e Edge) *marshalEdge { + return &marshalEdge{ + Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), + Source: marshalVertexID(e.Source()), + Target: marshalVertexID(e.Target()), + Attrs: make(map[string]string), + } +} + +// edges is a sort.Interface implementation for sorting edges by Source ID +type edges []*marshalEdge + +func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +// build a marshalGraph structure from a *Graph +func newMarshalGraph(name string, g *Graph) *marshalGraph { + mg := &marshalGraph{ + Type: "Graph", + Name: name, + Attrs: make(map[string]string), + } + + for _, v := range g.Vertices() { + id := marshalVertexID(v) + if sg, ok := marshalSubgrapher(v); ok { + smg := newMarshalGraph(VertexName(v), sg) + smg.ID = id + mg.Subgraphs = append(mg.Subgraphs, smg) + } + + mv := newMarshalVertex(v) + mg.Vertices = append(mg.Vertices, mv) + } + + sort.Sort(vertices(mg.Vertices)) + + for _, e := range g.Edges() { + mg.Edges = append(mg.Edges, newMarshalEdge(e)) + } + + sort.Sort(edges(mg.Edges)) + + for _, c := range (&AcyclicGraph{*g}).Cycles() { + var cycle []*marshalVertex + for _, v := range c { + mv := newMarshalVertex(v) + cycle = append(cycle, mv) + } + mg.Cycles = append(mg.Cycles, cycle) + } + + return mg +} + +// Attempt to return a unique ID for any vertex. +func marshalVertexID(v Vertex) string { + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return strconv.Itoa(int(val.Pointer())) + case reflect.Interface: + return strconv.Itoa(int(val.InterfaceData()[1])) + } + + if v, ok := v.(Hashable); ok { + h := v.Hashcode() + if h, ok := h.(string); ok { + return h + } + } + + // fallback to a name, which we hope is unique. + return VertexName(v) + + // we could try harder by attempting to read the arbitrary value from the + // interface, but we shouldn't get here from terraform right now. +} + +// check for a Subgrapher, and return the underlying *Graph. +func marshalSubgrapher(v Vertex) (*Graph, bool) { + sg, ok := v.(Subgrapher) + if !ok { + return nil, false + } + + switch g := sg.Subgraph().DirectedGraph().(type) { + case *Graph: + return g, true + case *AcyclicGraph: + return &g.Graph, true + } + + return nil, false +} + +// The DebugOperationEnd func type provides a way to call an End function via a +// method call, allowing for the chaining of methods in a defer statement. +type DebugOperationEnd func(string) + +// End calls function e with the info parameter, marking the end of this +// operation in the logs. +func (e DebugOperationEnd) End(info string) { e(info) } + +// encoder provides methods to write debug data to an io.Writer, and is a noop +// when no writer is present +type encoder struct { + sync.Mutex + w io.Writer +} + +// Encode is analogous to json.Encoder.Encode +func (e *encoder) Encode(i interface{}) { + if e == nil || e.w == nil { + return + } + e.Lock() + defer e.Unlock() + + js, err := json.Marshal(i) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } + js = append(js, '\n') + + _, err = e.w.Write(js) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } +} + +func (e *encoder) Add(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddVertex: newMarshalVertex(v), + }) +} + +// Remove records the removal of Vertex v. +func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveVertex: newMarshalVertex(v), + }) +} + +func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddEdge: newMarshalEdge(edge), + }) +} + +func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveEdge: newMarshalEdge(edge), + }) +} + +// BeginOperation marks the start of set of graph transformations, and returns +// an EndDebugOperation func to be called once the opration is complete. +func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { + if e == nil { + return func(string) {} + } + + e.Encode(marshalOperation{ + Type: typeOperation, + Begin: op, + Info: info, + }) + + return func(info string) { + e.Encode(marshalOperation{ + Type: typeOperation, + End: op, + Info: info, + }) + } +} + +// structure for recording graph transformations +type marshalTransform struct { + // Type: "Transform" + Type string + AddEdge *marshalEdge `json:",omitempty"` + RemoveEdge *marshalEdge `json:",omitempty"` + AddVertex *marshalVertex `json:",omitempty"` + RemoveVertex *marshalVertex `json:",omitempty"` +} + +func (t marshalTransform) Transform(g *marshalGraph) { + switch { + case t.AddEdge != nil: + g.connect(t.AddEdge) + case t.RemoveEdge != nil: + g.removeEdge(t.RemoveEdge) + case t.AddVertex != nil: + g.add(t.AddVertex) + case t.RemoveVertex != nil: + g.remove(t.RemoveVertex) + } +} + +// this structure allows us to decode any object in the json stream for +// inspection, then re-decode it into a proper struct if needed. +type streamDecode struct { + Type string + Map map[string]interface{} + JSON []byte +} + +func (s *streamDecode) UnmarshalJSON(d []byte) error { + s.JSON = d + err := json.Unmarshal(d, &s.Map) + if err != nil { + return err + } + + if t, ok := s.Map["Type"]; ok { + s.Type, _ = t.(string) + } + return nil +} + +// structure for recording the beginning and end of any multi-step +// transformations. These are informational, and not required to reproduce the +// graph state. +type marshalOperation struct { + Type string + Begin string `json:",omitempty"` + End string `json:",omitempty"` + Info string `json:",omitempty"` +} + +// decodeGraph decodes a marshalGraph from an encoded graph stream. +func decodeGraph(r io.Reader) (*marshalGraph, error) { + dec := json.NewDecoder(r) + + // a stream should always start with a graph + g := &marshalGraph{} + + err := dec.Decode(g) + if err != nil { + return nil, err + } + + // now replay any operations that occurred on the original graph + for dec.More() { + s := &streamDecode{} + err := dec.Decode(s) + if err != nil { + return g, err + } + + // the only Type we're concerned with here is Transform to complete the + // Graph + if s.Type != typeTransform { + continue + } + + t := &marshalTransform{} + err = json.Unmarshal(s.JSON, t) + if err != nil { + return g, err + } + t.Transform(g) + } + return g, nil +} + +// marshalVertexInfo allows encoding arbitrary information about the a single +// Vertex in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalVertexInfo struct { + Type string + Vertex *marshalVertex + Info string +} + +func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { + return &marshalVertexInfo{ + Type: infoType, + Vertex: newMarshalVertex(v), + Info: info, + } +} + +// marshalEdgeInfo allows encoding arbitrary information about the a single +// Edge in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalEdgeInfo struct { + Type string + Edge *marshalEdge + Info string +} + +func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { + return &marshalEdgeInfo{ + Type: infoType, + Edge: newMarshalEdge(e), + Info: info, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go new file mode 100644 index 000000000..92b42151d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/set.go @@ -0,0 +1,123 @@ +package dag + +import ( + "sync" +) + +// Set is a set data structure. +type Set struct { + m map[interface{}]interface{} + once sync.Once +} + +// Hashable is the interface used by set to get the hash code of a value. +// If this isn't given, then the value of the item being added to the set +// itself is used as the comparison value. +type Hashable interface { + Hashcode() interface{} +} + +// hashcode returns the hashcode used for set elements. +func hashcode(v interface{}) interface{} { + if h, ok := v.(Hashable); ok { + return h.Hashcode() + } + + return v +} + +// Add adds an item to the set +func (s *Set) Add(v interface{}) { + s.once.Do(s.init) + s.m[hashcode(v)] = v +} + +// Delete removes an item from the set. +func (s *Set) Delete(v interface{}) { + s.once.Do(s.init) + delete(s.m, hashcode(v)) +} + +// Include returns true/false of whether a value is in the set. +func (s *Set) Include(v interface{}) bool { + s.once.Do(s.init) + _, ok := s.m[hashcode(v)] + return ok +} + +// Intersection computes the set intersection with other. +func (s *Set) Intersection(other *Set) *Set { + result := new(Set) + if s == nil { + return result + } + if other != nil { + for _, v := range s.m { + if other.Include(v) { + result.Add(v) + } + } + } + + return result +} + +// Difference returns a set with the elements that s has but +// other doesn't. +func (s *Set) Difference(other *Set) *Set { + result := new(Set) + if s != nil { + for k, v := range s.m { + var ok bool + if other != nil { + _, ok = other.m[k] + } + if !ok { + result.Add(v) + } + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + +// Len is the number of items in the set. +func (s *Set) Len() int { + if s == nil { + return 0 + } + + return len(s.m) +} + +// List returns the list of set elements. +func (s *Set) List() []interface{} { + if s == nil { + return nil + } + + r := make([]interface{}, 0, len(s.m)) + for _, v := range s.m { + r = append(r, v) + } + + return r +} + +func (s *Set) init() { + s.m = make(map[interface{}]interface{}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go new file mode 100644 index 000000000..9d8b25ce2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/tarjan.go @@ -0,0 +1,107 @@ +package dag + +// StronglyConnected returns the list of strongly connected components +// within the Graph g. This information is primarily used by this package +// for cycle detection, but strongly connected components have widespread +// use. +func StronglyConnected(g *Graph) [][]Vertex { + vs := g.Vertices() + acct := sccAcct{ + NextIndex: 1, + VertexIndex: make(map[Vertex]int, len(vs)), + } + for _, v := range vs { + // Recurse on any non-visited nodes + if acct.VertexIndex[v] == 0 { + stronglyConnected(&acct, g, v) + } + } + return acct.SCC +} + +func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { + // Initial vertex visit + index := acct.visit(v) + minIdx := index + + for _, raw := range g.DownEdges(v).List() { + target := raw.(Vertex) + targetIdx := acct.VertexIndex[target] + + // Recurse on successor if not yet visited + if targetIdx == 0 { + minIdx = min(minIdx, stronglyConnected(acct, g, target)) + } else if acct.inStack(target) { + // Check if the vertex is in the stack + minIdx = min(minIdx, targetIdx) + } + } + + // Pop the strongly connected components off the stack if + // this is a root vertex + if index == minIdx { + var scc []Vertex + for { + v2 := acct.pop() + scc = append(scc, v2) + if v2 == v { + break + } + } + + acct.SCC = append(acct.SCC, scc) + } + + return minIdx +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// sccAcct is used ot pass around accounting information for +// the StronglyConnectedComponents algorithm +type sccAcct struct { + NextIndex int + VertexIndex map[Vertex]int + Stack []Vertex + SCC [][]Vertex +} + +// visit assigns an index and pushes a vertex onto the stack +func (s *sccAcct) visit(v Vertex) int { + idx := s.NextIndex + s.VertexIndex[v] = idx + s.NextIndex++ + s.push(v) + return idx +} + +// push adds a vertex to the stack +func (s *sccAcct) push(n Vertex) { + s.Stack = append(s.Stack, n) +} + +// pop removes a vertex from the stack +func (s *sccAcct) pop() Vertex { + n := len(s.Stack) + if n == 0 { + return nil + } + vertex := s.Stack[n-1] + s.Stack = s.Stack[:n-1] + return vertex +} + +// inStack checks if a vertex is in the stack +func (s *sccAcct) inStack(needle Vertex) bool { + for _, n := range s.Stack { + if n == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go new file mode 100644 index 000000000..5ddf8ef34 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/dag/walk.go @@ -0,0 +1,454 @@ +package dag + +import ( + "errors" + "log" + "sync" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Walker is used to walk every vertex of a graph in parallel. +// +// A vertex will only be walked when the dependencies of that vertex have +// been walked. If two vertices can be walked at the same time, they will be. +// +// Update can be called to update the graph. This can be called even during +// a walk, cahnging vertices/edges mid-walk. This should be done carefully. +// If a vertex is removed but has already been executed, the result of that +// execution (any error) is still returned by Wait. Changing or re-adding +// a vertex that has already executed has no effect. Changing edges of +// a vertex that has already executed has no effect. +// +// Non-parallelism can be enforced by introducing a lock in your callback +// function. However, the goroutine overhead of a walk will remain. +// Walker will create V*2 goroutines (one for each vertex, and dependency +// waiter for each vertex). In general this should be of no concern unless +// there are a huge number of vertices. +// +// The walk is depth first by default. This can be changed with the Reverse +// option. +// +// A single walker is only valid for one graph walk. After the walk is complete +// you must construct a new walker to walk again. State for the walk is never +// deleted in case vertices or edges are changed. +type Walker struct { + // Callback is what is called for each vertex + Callback WalkFunc + + // Reverse, if true, causes the source of an edge to depend on a target. + // When false (default), the target depends on the source. + Reverse bool + + // changeLock must be held to modify any of the fields below. Only Update + // should modify these fields. Modifying them outside of Update can cause + // serious problems. + changeLock sync.Mutex + vertices Set + edges Set + vertexMap map[Vertex]*walkerVertex + + // wait is done when all vertices have executed. It may become "undone" + // if new vertices are added. + wait sync.WaitGroup + + // diagsMap contains the diagnostics recorded so far for execution, + // and upstreamFailed contains all the vertices whose problems were + // caused by upstream failures, and thus whose diagnostics should be + // excluded from the final set. + // + // Readers and writers of either map must hold diagsLock. + diagsMap map[Vertex]tfdiags.Diagnostics + upstreamFailed map[Vertex]struct{} + diagsLock sync.Mutex +} + +type walkerVertex struct { + // These should only be set once on initialization and never written again. + // They are not protected by a lock since they don't need to be since + // they are write-once. + + // DoneCh is closed when this vertex has completed execution, regardless + // of success. + // + // CancelCh is closed when the vertex should cancel execution. If execution + // is already complete (DoneCh is closed), this has no effect. Otherwise, + // execution is cancelled as quickly as possible. + DoneCh chan struct{} + CancelCh chan struct{} + + // Dependency information. Any changes to any of these fields requires + // holding DepsLock. + // + // DepsCh is sent a single value that denotes whether the upstream deps + // were successful (no errors). Any value sent means that the upstream + // dependencies are complete. No other values will ever be sent again. + // + // DepsUpdateCh is closed when there is a new DepsCh set. + DepsCh chan bool + DepsUpdateCh chan struct{} + DepsLock sync.Mutex + + // Below is not safe to read/write in parallel. This behavior is + // enforced by changes only happening in Update. Nothing else should + // ever modify these. + deps map[Vertex]chan struct{} + depsCancelCh chan struct{} +} + +// Wait waits for the completion of the walk and returns diagnostics describing +// any problems that arose. Update should be called to populate the walk with +// vertices and edges prior to calling this. +// +// Wait will return as soon as all currently known vertices are complete. +// If you plan on calling Update with more vertices in the future, you +// should not call Wait until after this is done. +func (w *Walker) Wait() tfdiags.Diagnostics { + // Wait for completion + w.wait.Wait() + + var diags tfdiags.Diagnostics + w.diagsLock.Lock() + for v, vDiags := range w.diagsMap { + if _, upstream := w.upstreamFailed[v]; upstream { + // Ignore diagnostics for nodes that had failed upstreams, since + // the downstream diagnostics are likely to be redundant. + continue + } + diags = diags.Append(vDiags) + } + w.diagsLock.Unlock() + + return diags +} + +// Update updates the currently executing walk with the given graph. +// This will perform a diff of the vertices and edges and update the walker. +// Already completed vertices remain completed (including any errors during +// their execution). +// +// This returns immediately once the walker is updated; it does not wait +// for completion of the walk. +// +// Multiple Updates can be called in parallel. Update can be called at any +// time during a walk. +func (w *Walker) Update(g *AcyclicGraph) { + log.Print("[TRACE] dag/walk: updating graph") + var v, e *Set + if g != nil { + v, e = g.vertices, g.edges + } + + // Grab the change lock so no more updates happen but also so that + // no new vertices are executed during this time since we may be + // removing them. + w.changeLock.Lock() + defer w.changeLock.Unlock() + + // Initialize fields + if w.vertexMap == nil { + w.vertexMap = make(map[Vertex]*walkerVertex) + } + + // Calculate all our sets + newEdges := e.Difference(&w.edges) + oldEdges := w.edges.Difference(e) + newVerts := v.Difference(&w.vertices) + oldVerts := w.vertices.Difference(v) + + // Add the new vertices + for _, raw := range newVerts.List() { + v := raw.(Vertex) + + // Add to the waitgroup so our walk is not done until everything finishes + w.wait.Add(1) + + // Add to our own set so we know about it already + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) + w.vertices.Add(raw) + + // Initialize the vertex info + info := &walkerVertex{ + DoneCh: make(chan struct{}), + CancelCh: make(chan struct{}), + deps: make(map[Vertex]chan struct{}), + } + + // Add it to the map and kick off the walk + w.vertexMap[v] = info + } + + // Remove the old vertices + for _, raw := range oldVerts.List() { + v := raw.(Vertex) + + // Get the vertex info so we can cancel it + info, ok := w.vertexMap[v] + if !ok { + // This vertex for some reason was never in our map. This + // shouldn't be possible. + continue + } + + // Cancel the vertex + close(info.CancelCh) + + // Delete it out of the map + delete(w.vertexMap, v) + + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) + w.vertices.Delete(raw) + } + + // Add the new edges + var changedDeps Set + for _, raw := range newEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Get the info for the dep + depInfo, ok := w.vertexMap[dep] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Add the dependency to our waiter + waiterInfo.deps[dep] = depInfo.DoneCh + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: added edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Add(raw) + } + + // Process reoved edges + for _, raw := range oldEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Delete the dependency from the waiter + delete(waiterInfo.deps, dep) + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: removed edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Delete(raw) + } + + // For each vertex with changed dependencies, we need to kick off + // a new waiter and notify the vertex of the changes. + for _, raw := range changedDeps.List() { + v := raw.(Vertex) + info, ok := w.vertexMap[v] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Create a new done channel + doneCh := make(chan bool, 1) + + // Create the channel we close for cancellation + cancelCh := make(chan struct{}) + + // Build a new deps copy + deps := make(map[Vertex]<-chan struct{}) + for k, v := range info.deps { + deps[k] = v + } + + // Update the update channel + info.DepsLock.Lock() + if info.DepsUpdateCh != nil { + close(info.DepsUpdateCh) + } + info.DepsCh = doneCh + info.DepsUpdateCh = make(chan struct{}) + info.DepsLock.Unlock() + + // Cancel the older waiter + if info.depsCancelCh != nil { + close(info.depsCancelCh) + } + info.depsCancelCh = cancelCh + + log.Printf( + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", + VertexName(v)) + + // Start the waiter + go w.waitDeps(v, deps, doneCh, cancelCh) + } + + // Start all the new vertices. We do this at the end so that all + // the edge waiters and changes are setup above. + for _, raw := range newVerts.List() { + v := raw.(Vertex) + go w.walkVertex(v, w.vertexMap[v]) + } +} + +// edgeParts returns the waiter and the dependency, in that order. +// The waiter is waiting on the dependency. +func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { + if w.Reverse { + return e.Source(), e.Target() + } + + return e.Target(), e.Source() +} + +// walkVertex walks a single vertex, waiting for any dependencies before +// executing the callback. +func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { + // When we're done executing, lower the waitgroup count + defer w.wait.Done() + + // When we're done, always close our done channel + defer close(info.DoneCh) + + // Wait for our dependencies. We create a [closed] deps channel so + // that we can immediately fall through to load our actual DepsCh. + var depsSuccess bool + var depsUpdateCh chan struct{} + depsCh := make(chan bool, 1) + depsCh <- true + close(depsCh) + for { + select { + case <-info.CancelCh: + // Cancel + return + + case depsSuccess = <-depsCh: + // Deps complete! Mark as nil to trigger completion handling. + depsCh = nil + + case <-depsUpdateCh: + // New deps, reloop + } + + // Check if we have updated dependencies. This can happen if the + // dependencies were satisfied exactly prior to an Update occurring. + // In that case, we'd like to take into account new dependencies + // if possible. + info.DepsLock.Lock() + if info.DepsCh != nil { + depsCh = info.DepsCh + info.DepsCh = nil + } + if info.DepsUpdateCh != nil { + depsUpdateCh = info.DepsUpdateCh + } + info.DepsLock.Unlock() + + // If we still have no deps channel set, then we're done! + if depsCh == nil { + break + } + } + + // If we passed dependencies, we just want to check once more that + // we're not cancelled, since this can happen just as dependencies pass. + select { + case <-info.CancelCh: + // Cancelled during an update while dependencies completed. + return + default: + } + + // Run our callback or note that our upstream failed + var diags tfdiags.Diagnostics + var upstreamFailed bool + if depsSuccess { + log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) + diags = w.Callback(v) + } else { + log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) + // This won't be displayed to the user because we'll set upstreamFailed, + // but we need to ensure there's at least one error in here so that + // the failures will cascade downstream. + diags = diags.Append(errors.New("upstream dependencies failed")) + upstreamFailed = true + } + + // Record the result (we must do this after execution because we mustn't + // hold diagsLock while visiting a vertex.) + w.diagsLock.Lock() + if w.diagsMap == nil { + w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) + } + w.diagsMap[v] = diags + if w.upstreamFailed == nil { + w.upstreamFailed = make(map[Vertex]struct{}) + } + if upstreamFailed { + w.upstreamFailed[v] = struct{}{} + } + w.diagsLock.Unlock() +} + +func (w *Walker) waitDeps( + v Vertex, + deps map[Vertex]<-chan struct{}, + doneCh chan<- bool, + cancelCh <-chan struct{}) { + + // For each dependency given to us, wait for it to complete + for dep, depCh := range deps { + DepSatisfied: + for { + select { + case <-depCh: + // Dependency satisfied! + break DepSatisfied + + case <-cancelCh: + // Wait cancelled. Note that we didn't satisfy dependencies + // so that anything waiting on us also doesn't run. + doneCh <- false + return + + case <-time.After(time.Second * 5): + log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", + VertexName(v), VertexName(dep)) + } + } + } + + // Dependencies satisfied! We need to check if any errored + w.diagsLock.Lock() + defer w.diagsLock.Unlock() + for dep := range deps { + if w.diagsMap[dep].HasErrors() { + // One of our dependencies failed, so return false + doneCh <- false + return + } + } + + // All dependencies satisfied and successful + doneCh <- true +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go new file mode 100644 index 000000000..b86bd7923 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config.go @@ -0,0 +1,63 @@ +package earlyconfig + +import ( + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *tfconfig.Module + + // CallPos is the source position for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallPos tfconfig.SourcePos + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go new file mode 100644 index 000000000..3707f2738 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/config_build.go @@ -0,0 +1,144 @@ +package earlyconfig + +import ( + "fmt" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]*Config{} + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + var vc version.Constraints + if strings.TrimSpace(call.Version) != "" { + var err error + vc, err = version.NewConstraint(call.Version) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), + })) + continue + } + } + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.Source, + VersionConstraints: vc, + Parent: parent, + CallPos: call.Pos, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallPos: call.Pos, + SourceAddr: call.Source, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = diags.Append(modDiags) + + ret[call.Name] = child + } + + return ret, diags +} + +// ModuleRequest is used as part of the ModuleWalker interface used with +// function BuildConfig. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // VersionConstraint is the version constraint applied to the module in + // configuration. + VersionConstraints version.Constraints + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source position for the header of the "module" block + // in configuration that prompted this request. + CallPos tfconfig.SourcePos +} + +// ModuleWalker is an interface used with BuildConfig. +type ModuleWalker interface { + LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) + +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + return f(req) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go new file mode 100644 index 000000000..b2e1807eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/diagnostics.go @@ -0,0 +1,78 @@ +package earlyconfig + +import ( + "fmt" + + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { + ret := make(tfdiags.Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = wrapDiagnostic(diag) + } + return ret +} + +func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { + return wrappedDiagnostic{ + d: diag, + } +} + +type wrappedDiagnostic struct { + d tfconfig.Diagnostic +} + +func (d wrappedDiagnostic) Severity() tfdiags.Severity { + switch d.d.Severity { + case tfconfig.DiagError: + return tfdiags.Error + case tfconfig.DiagWarning: + return tfdiags.Warning + default: + // Should never happen since there are no other severities + return 0 + } +} + +func (d wrappedDiagnostic) Description() tfdiags.Description { + // Since the inspect library doesn't produce precise source locations, + // we include the position information as part of the error message text. + // See the comment inside method "Source" for more information. + switch { + case d.d.Pos == nil: + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: d.d.Detail, + } + case d.d.Detail != "": + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), + } + default: + return tfdiags.Description{ + Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), + } + } +} + +func (d wrappedDiagnostic) Source() tfdiags.Source { + // Since the inspect library is constrained by the lowest common denominator + // between legacy HCL and modern HCL, it only returns ranges at whole-line + // granularity, and that isn't sufficient to populate a tfdiags.Source + // and so we'll just omit ranges altogether and include the line number in + // the Description text. + // + // Callers that want to return nicer errors should consider reacting to + // earlyconfig errors by attempting a follow-up parse with the normal + // config loader, which can produce more precise source location + // information. + return tfdiags.Source{} +} + +func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go new file mode 100644 index 000000000..a9cf10f37 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/doc.go @@ -0,0 +1,20 @@ +// Package earlyconfig is a specialized alternative to the top-level "configs" +// package that does only shallow processing of configuration and is therefore +// able to be much more liberal than the full config loader in what it accepts. +// +// In particular, it can accept both current and legacy HCL syntax, and it +// ignores top-level blocks that it doesn't recognize. These two characteristics +// make this package ideal for dependency-checking use-cases so that we are +// more likely to be able to return an error message about an explicit +// incompatibility than to return a less-actionable message about a construct +// not being supported. +// +// However, its liberal approach also means it should be used sparingly. It +// exists primarily for "terraform init", so that it is able to detect +// incompatibilities more robustly when installing dependencies. For most +// other use-cases, use the "configs" and "configs/configload" packages. +// +// Package earlyconfig is a wrapper around the terraform-config-inspect +// codebase, adding to it just some helper functionality for Terraform's own +// use-cases. +package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go new file mode 100644 index 000000000..11eff2eb6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig/module.go @@ -0,0 +1,13 @@ +package earlyconfig + +import ( + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// LoadModule loads some top-level metadata for the module in the given +// directory. +func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + mod, diags := tfconfig.LoadModule(dir) + return mod, wrapDiagnostics(diags) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go new file mode 100644 index 000000000..1bb7b9f2f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/expand.go @@ -0,0 +1,152 @@ +package flatmap + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hcl2shim.UnknownVariableValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, len(keysList)) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go new file mode 100644 index 000000000..9ff6e4265 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go new file mode 100644 index 000000000..46b72c401 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go new file mode 100644 index 000000000..35a3e7a49 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/config/validator.go @@ -0,0 +1,214 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/flatmap" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// Validator is a helper that helps you validate the configuration +// of your resource, resource provider, etc. +// +// At the most basic level, set the Required and Optional lists to be +// specifiers of keys that are required or optional. If a key shows up +// that isn't in one of these two lists, then an error is generated. +// +// The "specifiers" allowed in this is a fairly rich syntax to help +// describe the format of your configuration: +// +// * Basic keys are just strings. For example: "foo" will match the +// "foo" key. +// +// * Nested structure keys can be matched by doing +// "listener.*.foo". This will verify that there is at least one +// listener element that has the "foo" key set. +// +// * The existence of a nested structure can be checked by simply +// doing "listener.*" which will verify that there is at least +// one element in the "listener" structure. This is NOT +// validating that "listener" is an array. It is validating +// that it is a nested structure in the configuration. +// +type Validator struct { + Required []string + Optional []string +} + +func (v *Validator) Validate( + c *terraform.ResourceConfig) (ws []string, es []error) { + // Flatten the configuration so it is easier to reason about + flat := flatmap.Flatten(c.Raw) + + keySet := make(map[string]validatorKey) + for i, vs := range [][]string{v.Required, v.Optional} { + req := i == 0 + for _, k := range vs { + vk, err := newValidatorKey(k, req) + if err != nil { + es = append(es, err) + continue + } + + keySet[k] = vk + } + } + + purged := make([]string, 0) + for _, kv := range keySet { + p, w, e := kv.Validate(flat) + if len(w) > 0 { + ws = append(ws, w...) + } + if len(e) > 0 { + es = append(es, e...) + } + + purged = append(purged, p...) + } + + // Delete all the keys we processed in order to find + // the unknown keys. + for _, p := range purged { + delete(flat, p) + } + + // The rest are unknown + for k, _ := range flat { + es = append(es, fmt.Errorf("Unknown configuration: %s", k)) + } + + return +} + +type validatorKey interface { + // Validate validates the given configuration and returns viewed keys, + // warnings, and errors. + Validate(map[string]string) ([]string, []string, []error) +} + +func newValidatorKey(k string, req bool) (validatorKey, error) { + var result validatorKey + + parts := strings.Split(k, ".") + if len(parts) > 1 && parts[1] == "*" { + result = &nestedValidatorKey{ + Parts: parts, + Required: req, + } + } else { + result = &basicValidatorKey{ + Key: k, + Required: req, + } + } + + return result, nil +} + +// basicValidatorKey validates keys that are basic such as "foo" +type basicValidatorKey struct { + Key string + Required bool +} + +func (v *basicValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + for k, _ := range m { + // If we have the exact key its a match + if k == v.Key { + return []string{k}, nil, nil + } + } + + if !v.Required { + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", v.Key)} +} + +type nestedValidatorKey struct { + Parts []string + Required bool +} + +func (v *nestedValidatorKey) validate( + m map[string]string, + prefix string, + offset int) ([]string, []string, []error) { + if offset >= len(v.Parts) { + // We're at the end. Look for a specific key. + v2 := &basicValidatorKey{Key: prefix, Required: v.Required} + return v2.Validate(m) + } + + current := v.Parts[offset] + + // If we're at offset 0, special case to start at the next one. + if offset == 0 { + return v.validate(m, current, offset+1) + } + + // Determine if we're doing a "for all" or a specific key + if current != "*" { + // We're looking at a specific key, continue on. + return v.validate(m, prefix+"."+current, offset+1) + } + + // We're doing a "for all", so we loop over. + countStr, ok := m[prefix+".#"] + if !ok { + if !v.Required { + // It wasn't required, so its no problem. + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", prefix)} + } + + count, err := strconv.ParseInt(countStr, 0, 0) + if err != nil { + // This shouldn't happen if flatmap works properly + panic("invalid flatmap array") + } + + var e []error + var w []string + u := make([]string, 1, count+1) + u[0] = prefix + ".#" + for i := 0; i < int(count); i++ { + prefix := fmt.Sprintf("%s.%d", prefix, i) + + // Mark that we saw this specific key + u = append(u, prefix) + + // Mark all prefixes of this + for k, _ := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + u = append(u, k) + } + + // If we have more parts, then validate deeper + if offset+1 < len(v.Parts) { + u2, w2, e2 := v.validate(m, prefix, offset+1) + + u = append(u, u2...) + w = append(w, w2...) + e = append(e, e2...) + } + } + + return u, w, e +} + +func (v *nestedValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + return v.validate(m, "", 0) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go new file mode 100644 index 000000000..54899bc65 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean/name_suggestion.go @@ -0,0 +1,24 @@ +package didyoumean + +import ( + "github.com/agext/levenshtein" +) + +// NameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func NameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go new file mode 100644 index 000000000..82b5937bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/doc.go @@ -0,0 +1,6 @@ +// Package plugin contains types and functions to help Terraform plugins +// implement the plugin rpc interface. +// The primary Provider type will be responsible for converting from the grpc +// wire protocol to the types and methods known to the provider +// implementations. +package plugin diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go new file mode 100644 index 000000000..388f1ed59 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/grpc_provider.go @@ -0,0 +1,1398 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +const newExtraKey = "_new_extra_shim" + +// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a +// proto.ProviderServer implementation. If the provided provider is not a +// *schema.Provider, this will return nil, +func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { + sp, ok := p.(*schema.Provider) + if !ok { + return nil + } + + return &GRPCProviderServer{ + provider: sp, + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *schema.Provider +} + +func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { + // Here we are certain that the provider is being called through grpc, so + // make sure the feature flag for helper/schema is set + schema.SetProto5() + + resp := &proto.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*proto.Schema), + DataSourceSchemas: make(map[string]*proto.Schema), + } + + resp.Provider = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &proto.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &proto.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { + resp := &proto.PrepareProviderConfig_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { + resp := &proto.ValidateResourceTypeConfig_Response{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateResource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { + resp := &proto.ValidateDataSourceConfig_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateDataSource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { + resp := &proto.UpgradeResourceState_Response{} + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.Json) > 0: + err = json.Unmarshal(req.RawState.Json, &jsonMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + default: + log.Println("[DEBUG] no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] unexpected type %#v for map in json state", ty) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + delete(v, attr) + continue + } + + s.removeAttributes(attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} + +func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { + resp := &proto.Configure_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + err = s.provider.Configure(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { + resp := &proto.ReadResource_Response{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + resp := &proto.PlanResourceChange_Response{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := s.provider.SimpleDiff(info, priorState, cfg) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &proto.DynamicValue{ + Msgpack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &schema.ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + resp := &proto.ApplyResourceChange_Response{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + newInstanceState, err := s.provider.Apply(info, priorState, diff) + // we record the error here, but continue processing any returned state. + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { + resp := &proto.ImportResourceState_Response{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(info, req.Id) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &proto.ImportResourceState_ImportedResource{ + TypeName: resourceType, + State: &proto.DynamicValue{ + Msgpack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { + resp := &proto.ReadDataSource_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + diff, err := s.provider.ReadDataDiff(info, config) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, err := s.provider.ReadDataApply(info, diff) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *proto.AttributePath { + var steps []*proto.AttributePath_Step + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: i, + }, + }) + case cty.String: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: s.Key.AsString(), + }, + }) + } + } + } + + return &proto.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[schema.TimeoutsConfigKey] + if ok { + toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *schema.Resource) *schema.Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(schema.Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*schema.Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *schema.Schema) *schema.Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(schema.Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *schema.Schema: + newSchema.Elem = stripSchema(e) + case *schema.Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { + var diags []*proto.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go new file mode 100644 index 000000000..a22a264fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin/unknown.go @@ -0,0 +1,131 @@ +package plugin + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go new file mode 100644 index 000000000..ad8d626c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/httpclient/client.go @@ -0,0 +1,53 @@ +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" +const userAgentFormat = "Terraform/%s" + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a Terraform User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: UserAgentString(), + inner: cli.Transport, + } + return cli +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + log.Printf("[TRACE] HTTP client %s request to %s", req.Method, req.URL.String()) + return rt.inner.RoundTrip(req) +} + +func UserAgentString() string { + ua := fmt.Sprintf(userAgentFormat, version.Version) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go new file mode 100644 index 000000000..7096ff74f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/copy_dir.go @@ -0,0 +1,125 @@ +package initwd + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go new file mode 100644 index 000000000..b9d938dbb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/doc.go @@ -0,0 +1,7 @@ +// Package initwd contains various helper functions used by the "terraform init" +// command to initialize a working directory. +// +// These functions may also be used from testing code to simulate the behaviors +// of "terraform init" against test fixtures, but should not be used elsewhere +// in the main code. +package initwd diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go new file mode 100644 index 000000000..641e71dec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go @@ -0,0 +1,363 @@ +package initwd + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +const initFromModuleRootCallName = "root" +const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." + +// DirFromModule populates the given directory (which must exist and be +// empty) with the contents of the module at the given source address. +// +// It does this by installing the given module and all of its descendent +// modules in a temporary root directory and then copying the installed +// files into suitable locations. As a consequence, any diagnostics it +// generates will reveal the location of this temporary directory to the +// user. +// +// This rather roundabout installation approach is taken to ensure that +// installation proceeds in a manner identical to normal module installation. +// +// If the given source address specifies a sub-directory of the given +// package then only the sub-directory and its descendents will be copied +// into the given root directory, which will cause any relative module +// references using ../ from that module to be unresolvable. Error diagnostics +// are produced in that case, to prompt the user to rewrite the source strings +// to be absolute references to the original remote module. +func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // The way this function works is pretty ugly, but we accept it because + // -from-module is a less important case than normal module installation + // and so it's better to keep this ugly complexity out here rather than + // adding even more complexity to the normal module installer. + + // The target directory must exist but be empty. + { + entries, err := ioutil.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target directory does not exist", + fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read target directory", + fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), + )) + } + return diags + } + haveEntries := false + for _, entry := range entries { + if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { + continue + } + haveEntries = true + } + if haveEntries { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't populate non-empty directory", + fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), + )) + return diags + } + } + + instDir := filepath.Join(rootDir, ".terraform/init-from-module") + inst := NewModuleInstaller(instDir, reg) + log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) + os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too + err := os.MkdirAll(instDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create temporary directory", + fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), + )) + return diags + } + + instManifest := make(modsdir.Manifest) + retManifest := make(modsdir.Manifest) + + fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) + fakePos := tfconfig.SourcePos{ + Filename: fakeFilename, + Line: 1, + } + + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddr = filepath.Join(wd, sourceAddr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) + } + } + } + + // Now we need to create an artificial root module that will seed our + // installation process. + fakeRootModule := &tfconfig.Module{ + ModuleCalls: map[string]*tfconfig.ModuleCall{ + initFromModuleRootCallName: { + Name: initFromModuleRootCallName, + Source: sourceAddr, + Pos: fakePos, + }, + }, + } + + // wrapHooks filters hook notifications to only include Download calls + // and to trim off the initFromModuleRootCallName prefix. We'll produce + // our own Install notifications directly below. + wrapHooks := installHooksInitDir{ + Wrapped: hooks, + } + getter := reusingGetter{} + _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) + diags = append(diags, instDiags...) + if instDiags.HasErrors() { + return diags + } + + // If all of that succeeded then we'll now migrate what was installed + // into the final directory structure. + err = os.MkdirAll(modulesDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local modules directory", + fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), + )) + return diags + } + + recordKeys := make([]string, 0, len(instManifest)) + for k := range instManifest { + recordKeys = append(recordKeys, k) + } + sort.Strings(recordKeys) + + for _, recordKey := range recordKeys { + record := instManifest[recordKey] + + if record.Key == initFromModuleRootCallName { + // We've found the module the user requested, which we must + // now copy into rootDir so it can be used directly. + log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) + err := copyDir(rootDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy root module", + fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), + )) + continue + } + + // We'll try to load the newly-copied module here just so we can + // sniff for any module calls that ../ out of the root directory + // and must thus be rewritten to be absolute addresses again. + // For now we can't do this rewriting automatically, but we'll + // generate an error to help the user do it manually. + mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway + if mod != nil { + for _, mc := range mod.ModuleCalls { + if pathTraversesUp(mc.Source) { + packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) + newSubdir := filepath.Join(givenSubdir, mc.Source) + if pathTraversesUp(newSubdir) { + // This should never happen in any reasonable + // configuration since this suggests a path that + // traverses up out of the package root. We'll just + // ignore this, since we'll fail soon enough anyway + // trying to resolve this path when this module is + // loaded. + continue + } + + var newAddr = packageAddr + if newSubdir != "" { + newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Root module references parent directory", + fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), + )) + continue + } + } + } + + retManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + continue + } + + if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { + // Ignore the *real* root module, whose key is empty, since + // we're only interested in the module named "root" and its + // descendents. + continue + } + + newKey := record.Key[len(initFromModuleRootKeyPrefix):] + instPath := filepath.Join(modulesDir, newKey) + tempPath := filepath.Join(instDir, record.Key) + + // tempPath won't be present for a module that was installed from + // a relative path, so in that case we just record the installation + // directory and assume it was already copied into place as part + // of its parent. + if _, err := os.Stat(tempPath); err != nil { + if !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to stat temporary module install directory", + fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + var parentKey string + if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { + parentKey = newKey[:lastDot] + } else { + parentKey = "" // parent is the root module + } + + parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] + parentNew := retManifest[parentKey] + + // We need to figure out which portion of our directory is the + // parent package path and which portion is the subdirectory + // under that. + baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newDir := filepath.Join(parentNew.Dir, baseDirRel) + log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) + newRecord := record // shallow copy + newRecord.Dir = newDir + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + continue + } + + err = os.MkdirAll(instPath, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create module install directory", + fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + // We copy rather than "rename" here because renaming between directories + // can be tricky in edge-cases like network filesystems, etc. + log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) + err := copyDir(instPath, tempPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy descendent module", + fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), + )) + continue + } + + subDir, err := filepath.Rel(tempPath, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newRecord := record // shallow copy + newRecord.Dir = filepath.Join(instPath, subDir) + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + } + + retManifest.WriteSnapshotToDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write module manifest", + fmt.Sprintf("Error writing module manifest: %s.", err), + )) + } + + if !diags.HasErrors() { + // Try to clean up our temporary directory, but don't worry if we don't + // succeed since it shouldn't hurt anything. + os.RemoveAll(instDir) + } + + return diags +} + +func pathTraversesUp(path string) bool { + return strings.HasPrefix(filepath.ToSlash(path), "../") +} + +// installHooksInitDir is an adapter wrapper for an InstallHooks that +// does some fakery to make downloads look like they are happening in their +// final locations, rather than in the temporary loader we use. +// +// It also suppresses "Install" calls entirely, since InitDirFromModule +// does its own installation steps after the initial installation pass +// has completed. +type installHooksInitDir struct { + Wrapped ModuleInstallHooks + ModuleInstallHooksImpl +} + +func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { + if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { + // We won't announce the root module, since hook implementations + // don't expect to see that and the caller will usually have produced + // its own user-facing notification about what it's doing anyway. + return + } + + trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] + h.Wrapped.Download(trimAddr, packageAddr, version) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go new file mode 100644 index 000000000..8dc0374b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/getter.go @@ -0,0 +1,204 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + if isMaybeRelativeLocalPath(realAddr) { + return "", &MaybeRelativePathErr{addr} + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) +} + +func isMaybeRelativeLocalPath(addr string) bool { + if strings.HasPrefix(addr, "file://") { + _, err := os.Stat(addr[7:]) + if err != nil { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go new file mode 100644 index 000000000..1150b093c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go new file mode 100644 index 000000000..30532f54a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go new file mode 100644 index 000000000..3ed58e4bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package initwd + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go new file mode 100644 index 000000000..8e0557567 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install.go @@ -0,0 +1,558 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" + "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +type ModuleInstaller struct { + modsDir string + reg *registry.Client +} + +func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { + return &ModuleInstaller{ + modsDir: modsDir, + reg: reg, + } +} + +// InstallModules analyses the root module in the given directory and installs +// all of its direct and transitive dependencies into the given modules +// directory, which must already exist. +// +// Since InstallModules makes possibly-time-consuming calls to remote services, +// a hook interface is supported to allow the caller to be notified when +// each module is installed and, for remote modules, when downloading begins. +// LoadConfig guarantees that two hook calls will not happen concurrently but +// it does not guarantee any particular ordering of hook calls. This mechanism +// is for UI feedback only and does not give the caller any control over the +// process. +// +// If modules are already installed in the target directory, they will be +// skipped unless their source address or version have changed or unless +// the upgrade flag is set. +// +// InstallModules never deletes any directory, except in the case where it +// needs to replace a directory that is already present with a newly-extracted +// package. +// +// If the returned diagnostics contains errors then the module installation +// may have wholly or partially completed. Modules must be loaded in order +// to find their dependencies, so this function does many of the same checks +// as LoadConfig as a side-effect. +// +// If successful (the returned diagnostics contains no errors) then the +// first return value is the early configuration tree that was constructed by +// the installation process. +func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { + log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) + + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read modules manifest file", + fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), + )) + return nil, diags + } + + getter := reusingGetter{} + cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) + diags = append(diags, instDiags...) + + return cfg, diags +} + +func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if hooks == nil { + // Use our no-op implementation as a placeholder + hooks = ModuleInstallHooksImpl{} + } + + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + manifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + + cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + + key := manifest.ModuleKey(req.Path) + instPath := i.packageInstallPath(req.Path) + + log.Printf("[DEBUG] Module installer: begin %s", key) + + // First we'll check if we need to upgrade/replace an existing + // installed module, and delete it out of the way if so. + replace := upgrade + if !replace { + record, recorded := manifest[key] + switch { + case !recorded: + log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) + replace = true + case record.SourceAddr != req.SourceAddr: + log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) + replace = true + case record.Version != nil && !req.VersionConstraints.Check(record.Version): + log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) + replace = true + } + } + + // If we _are_ planning to replace this module, then we'll remove + // it now so our installation code below won't conflict with any + // existing remnants. + if replace { + if _, recorded := manifest[key]; recorded { + log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) + } + delete(manifest, key) + // Deleting a module invalidates all of its descendent modules too. + keyPrefix := key + "." + for subKey := range manifest { + if strings.HasPrefix(subKey, keyPrefix) { + if _, recorded := manifest[subKey]; recorded { + log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) + } + delete(manifest, subKey) + } + } + } + + record, recorded := manifest[key] + if !recorded { + // Clean up any stale cache directory that might be present. + // If this is a local (relative) source then the dir will + // not exist, but we'll ignore that. + log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) + err := os.RemoveAll(instPath) + if err != nil && !os.IsNotExist(err) { + log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove local module cache", + fmt.Sprintf( + "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", + instPath, err, + ), + )) + return nil, nil, diags + } + } else { + // If this module is already recorded and its root directory + // exists then we will just load what's already there and + // keep our existing record. + info, err := os.Stat(record.Dir) + if err == nil && info.IsDir() { + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + + log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) + return mod, record.Version, diags + } + } + + // If we get down here then it's finally time to actually install + // the module. There are some variants to this process depending + // on what type of module source address we have. + switch { + + case isLocalSourceAddr(req.SourceAddr): + log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) + mod, mDiags := i.installLocalModule(req, key, manifest, hooks) + diags = append(diags, mDiags...) + return mod, nil, diags + + case isRegistrySourceAddr(req.SourceAddr): + addr, err := regsrc.ParseModuleSource(req.SourceAddr) + if err != nil { + // Should never happen because isRegistrySourceAddr already validated + panic(err) + } + log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) + + mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, v, diags + + default: + log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) + + mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, nil, diags + } + + }, + )) + diags = append(diags, cDiags...) + + err := manifest.WriteSnapshotToDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update module manifest", + fmt.Sprintf("Unable to write the module manifest file: %s", err), + )) + } + + return cfg, diags +} + +func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + parentKey := manifest.ModuleKey(req.Parent.Path) + parentRecord, recorded := manifest[parentKey] + if !recorded { + // This is indicative of a bug rather than a user-actionable error + panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) + } + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + + // For local sources we don't actually need to modify the + // filesystem at all because the parent already wrote + // the files we need, and so we just load up what's already here. + newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) + + log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) + // it is possible that the local directory is a symlink + newDir, err := filepath.EvalSymlinks(newDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), + )) + } + + mod, mDiags := earlyconfig.LoadModule(newDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: newDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) + hooks.Install(key, nil, newDir) + + return mod, diags +} + +func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + hostname, err := addr.SvcHost() + if err != nil { + // If it looks like the user was trying to use punycode then we'll generate + // a specialized error for that case. We require the unicode form of + // hostname so that hostnames are always human-readable in configuration + // and punycode can't be used to hide a malicious module hostname. + if strings.HasPrefix(addr.RawHost.Raw, "xn--") { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + return nil, nil, diags + } + + reg := i.reg + + log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) + resp, err := reg.ModuleVersions(addr) + if err != nil { + if registry.IsModuleNotFound(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error accessing remote module registry", + fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), + )) + } + return nil, nil, diags + } + + // The response might contain information about dependencies to allow us + // to potentially optimize future requests, but we don't currently do that + // and so for now we'll just take the first item which is guaranteed to + // be the address we requested. + if len(resp.Modules) < 1 { + // Should never happen, but since this is a remote service that may + // be implemented by third-parties we will handle it gracefully. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, nil, diags + } + + modMeta := resp.Modules[0] + + var latestMatch *version.Version + var latestVersion *version.Version + for _, mv := range modMeta.Versions { + v, err := version.NewVersion(mv.Version) + if err != nil { + // Should never happen if the registry server is compliant with + // the protocol, but we'll warn if not to assist someone who + // might be developing a module registry server. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + continue + } + + // If we've found a pre-release version then we'll ignore it unless + // it was exactly requested. + if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { + log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) + continue + } + + if latestVersion == nil || v.GreaterThan(latestVersion) { + latestVersion = v + } + + if req.VersionConstraints.Check(v) { + if latestMatch == nil || v.GreaterThan(latestMatch) { + latestMatch = v + } + } + } + + if latestVersion == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module has no versions", + fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + return nil, nil, diags + } + + if latestMatch == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unresolvable module version constraint", + fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), + )) + return nil, nil, diags + } + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, latestMatch) + + // If we manage to get down here then we've found a suitable version to + // install, so we need to ask the registry where we should download it from. + // The response to this is a go-getter-style address string. + dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) + if err != nil { + log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) + + modDir, err := getter.getWithGoGetter(instPath, dlAddr) + if err != nil { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) + + if addr.RawSubmodule != "" { + // Append the user's requested subdirectory to any subdirectory that + // was implied by any of the nested layers we expanded within go-getter. + modDir = filepath.Join(modDir, addr.RawSubmodule) + } + + log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For registry modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Version: latestMatch, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, latestMatch, modDir) + + return mod, latestMatch, diags +} + +func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, nil) + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, diags + } + + modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) + if err != nil { + if _, ok := err.(*MaybeRelativePathErr); ok { + log.Printf( + "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", + req.SourceAddr, + ) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf( + "The module address %q could not be resolved.\n\n"+ + "If you intended this as a path relative to the current "+ + "module, use \"./%s\" instead. The \"./\" prefix "+ + "indicates that the address is a relative filesystem path.", + req.SourceAddr, req.SourceAddr, + ), + )) + } else { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), + )) + } + return nil, diags + + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) + + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For go-getter modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, nil, modDir) + + return mod, diags +} + +func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { + return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go new file mode 100644 index 000000000..817a6dc83 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/module_install_hooks.go @@ -0,0 +1,36 @@ +package initwd + +import ( + version "github.com/hashicorp/go-version" +) + +// ModuleInstallHooks is an interface used to provide notifications about the +// installation process being orchestrated by InstallModules. +// +// This interface may have new methods added in future, so implementers should +// embed InstallHooksImpl to get no-op implementations of any unimplemented +// methods. +type ModuleInstallHooks interface { + // Download is called for modules that are retrieved from a remote source + // before that download begins, to allow a caller to give feedback + // on progress through a possibly-long sequence of downloads. + Download(moduleAddr, packageAddr string, version *version.Version) + + // Install is called for each module that is installed, even if it did + // not need to be downloaded from a remote source. + Install(moduleAddr string, version *version.Version, localPath string) +} + +// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that +// can be embedded in another implementation struct to allow only partial +// implementation of the interface. +type ModuleInstallHooksImpl struct { +} + +func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { +} + +func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { +} + +var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go new file mode 100644 index 000000000..8f89909c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/doc.go @@ -0,0 +1,5 @@ +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go new file mode 100644 index 000000000..f782f6b75 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/fixup.go @@ -0,0 +1,187 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go new file mode 100644 index 000000000..129ee0e82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/schema.go @@ -0,0 +1,119 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go new file mode 100644 index 000000000..f5ed1c539 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr/variables.go @@ -0,0 +1,45 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.IsCollectionType() && attrS.Type.ElementType().IsObjectType() { + // ☝️Check for collection type before element type, because if this is a mis-placed reference, + // a panic here will prevent other useful diags from being elevated to show the user what to fix + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go new file mode 100644 index 000000000..13f7ed935 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/data.go @@ -0,0 +1,34 @@ +package lang + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the Terraform codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResource(addrs.Resource, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go new file mode 100644 index 000000000..af5c5cac0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/doc.go @@ -0,0 +1,5 @@ +// Package lang deals with the runtime aspects of Terraform's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go new file mode 100644 index 000000000..ec48a873f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/eval.go @@ -0,0 +1,473 @@ +package lang + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/ext/dynblock" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(evalDiags) + + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(evalDiags) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + }) + } + } + + return val, diags +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(refs, s.SelfAddr) +} + +func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + vals := make(map[string]cty.Value) + funcs := s.Functions() + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: funcs, + } + + if len(refs) == 0 { + // Easy path for common case where there are no references at all. + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { + diags = diags.Append(staticDiags) + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + dataResources := map[string]map[string]cty.Value{} + managedResources := map[string]map[string]cty.Value{} + wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} + moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} + inputVariables := map[string]cty.Value{} + localValues := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + terraformAttrs := map[string]cty.Value{} + countAttrs := map[string]cty.Value{} + forEachAttrs := map[string]cty.Value{} + var self cty.Value + + for _, ref := range refs { + rng := ref.SourceRange + + rawSubj := ref.Subject + if rawSubj == addrs.Self { + if selfAddr == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + if selfAddr == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + + // self can only be used within a resource instance + subj := selfAddr.(addrs.ResourceInstance) + + val, valDiags := normalizeRefValue(s.Data.GetResource(subj.ContainingResource(), rng)) + + diags = diags.Append(valDiags) + + // Self is an exception in that it must always resolve to a + // particular instance. We will still insert the full resource into + // the context below. + switch k := subj.Key.(type) { + case addrs.IntKey: + self = val.Index(cty.NumberIntVal(int64(k))) + case addrs.StringKey: + self = val.Index(cty.StringVal(string(k))) + default: + self = val + } + + continue + } + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs, however we are removing the possibility of + // ResourceInstance beforehand. + if addr, ok := rawSubj.(addrs.ResourceInstance); ok { + rawSubj = addr.ContainingResource() + } + + switch subj := rawSubj.(type) { + case addrs.Resource: + var into map[string]map[string]cty.Value + switch subj.Mode { + case addrs.ManagedResourceMode: + into = managedResources + case addrs.DataResourceMode: + into = dataResources + default: + panic(fmt.Errorf("unsupported ResourceMode %s", subj.Mode)) + } + + val, valDiags := normalizeRefValue(s.Data.GetResource(subj, rng)) + diags = diags.Append(valDiags) + + r := subj + if into[r.Type] == nil { + into[r.Type] = make(map[string]cty.Value) + } + into[r.Type][r.Name] = val + + case addrs.ModuleCallInstance: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) + diags = diags.Append(valDiags) + + if wholeModules[subj.Call.Name] == nil { + wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) + } + wholeModules[subj.Call.Name][subj.Key] = val + + case addrs.ModuleCallOutput: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) + diags = diags.Append(valDiags) + + callName := subj.Call.Call.Name + callKey := subj.Call.Key + if moduleOutputs[callName] == nil { + moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) + } + if moduleOutputs[callName][callKey] == nil { + moduleOutputs[callName][callKey] = make(map[string]cty.Value) + } + moduleOutputs[callName][callKey][subj.Name] = val + + case addrs.InputVariable: + val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) + diags = diags.Append(valDiags) + inputVariables[subj.Name] = val + + case addrs.LocalValue: + val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) + diags = diags.Append(valDiags) + localValues[subj.Name] = val + + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + + case addrs.CountAttr: + val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) + diags = diags.Append(valDiags) + countAttrs[subj.Name] = val + + case addrs.ForEachAttr: + val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng)) + diags = diags.Append(valDiags) + forEachAttrs[subj.Name] = val + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + } + + for k, v := range buildResourceObjects(managedResources) { + vals[k] = v + } + vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) + vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) + vals["var"] = cty.ObjectVal(inputVariables) + vals["local"] = cty.ObjectVal(localValues) + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["count"] = cty.ObjectVal(countAttrs) + vals["each"] = cty.ObjectVal(forEachAttrs) + if self != cty.NilVal { + vals["self"] = self + } + + return ctx, diags +} + +func buildResourceObjects(resources map[string]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, nameVals := range resources { + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + + for name, keys := range wholeModules { + vals[name] = buildInstanceObjects(keys) + } + + for name, keys := range moduleOutputs { + if _, exists := wholeModules[name]; exists { + // If we also have a whole module value for this name then we'll + // skip this since the individual outputs are embedded in that result. + continue + } + + // The shape of this collection isn't compatible with buildInstanceObjects, + // but rather than replicating most of the buildInstanceObjects logic + // here we'll instead first transform the structure to be what that + // function expects and then use it. This is a little wasteful, but + // we do not expect this these maps to be large and so the extra work + // here should not hurt too much. + flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) + for k, vals := range keys { + flattened[k] = cty.ObjectVal(vals) + } + vals[name] = buildInstanceObjects(flattened) + } + + return vals +} + +func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { + if val, exists := keys[addrs.NoKey]; exists { + // If present, a "no key" value supersedes all other values, + // since they should be embedded inside it. + return val + } + + // If we only have individual values then we need to construct + // either a list or a map, depending on what sort of keys we + // have. + haveInt := false + haveString := false + maxInt := 0 + + for k := range keys { + switch tk := k.(type) { + case addrs.IntKey: + haveInt = true + if int(tk) > maxInt { + maxInt = int(tk) + } + case addrs.StringKey: + haveString = true + } + } + + // We should either have ints or strings and not both, but + // if we have both then we'll prefer strings and let the + // language interpreter try to convert the int keys into + // strings in a map. + switch { + case haveString: + vals := make(map[string]cty.Value) + for k, v := range keys { + switch tk := k.(type) { + case addrs.StringKey: + vals[string(tk)] = v + case addrs.IntKey: + sk := strconv.Itoa(int(tk)) + vals[sk] = v + } + } + return cty.ObjectVal(vals) + case haveInt: + // We'll make a tuple that is long enough for our maximum + // index value. It doesn't matter if we end up shorter than + // the number of instances because if length(...) were + // being evaluated we would've got a NoKey reference and + // thus not ended up in this codepath at all. + vals := make([]cty.Value, maxInt+1) + for i := range vals { + if v, exists := keys[addrs.IntKey(i)]; exists { + vals[i] = v + } else { + // Just a placeholder, since nothing will access this anyway + vals[i] = cty.DynamicVal + } + } + return cty.TupleVal(vals) + default: + // Should never happen because there are no other key types. + log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") + return cty.EmptyObjectVal + } +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go new file mode 100644 index 000000000..8c0751489 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/cidr.go @@ -0,0 +1,218 @@ +package funcs + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if newbits > 32 { + return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrSubnetsFunc is similar to CidrSubnetFunc but calculates many consecutive +// subnet addresses at once, rather than just a single subnet extension. +var CidrSubnetsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "newbits", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), function.NewArgErrorf(0, "invalid CIDR expression: %s", err) + } + startPrefixLen, _ := network.Mask.Size() + + prefixLengthArgs := args[1:] + if len(prefixLengthArgs) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + var firstLength int + if err := gocty.FromCtyValue(prefixLengthArgs[0], &firstLength); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(1, err) + } + firstLength += startPrefixLen + + retVals := make([]cty.Value, len(prefixLengthArgs)) + + current, _ := cidr.PreviousSubnet(network, firstLength) + for i, lengthArg := range prefixLengthArgs { + var length int + if err := gocty.FromCtyValue(lengthArg, &length); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(i+1, err) + } + + if length < 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "must extend prefix by at least one bit") + } + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if length > 32 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "may not extend prefix by more than 32 bits") + } + length += startPrefixLen + if length > (len(network.IP) * 8) { + protocol := "IP" + switch len(network.IP) * 8 { + case 32: + protocol = "IPv4" + case 128: + protocol = "IPv6" + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "would extend prefix to %d bits, which is too long for an %s address", length, protocol) + } + + next, rollover := cidr.NextSubnet(current, length) + if rollover || !network.Contains(next.IP) { + // If we run out of suffix bits in the base CIDR prefix then + // NextSubnet will start incrementing the prefix bits, which + // we don't allow because it would then allocate addresses + // outside of the caller's given prefix. + return cty.UnknownVal(cty.String), function.NewArgErrorf(i+1, "not enough remaining address space for a subnet with a prefix of %d bits after %s", length, current.String()) + } + + current = next + retVals[i] = cty.StringVal(current.String()) + } + + return cty.ListVal(retVals), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} + +// CidrSubnets calculates a sequence of consecutive subnet prefixes that may +// be of different prefix lengths under a common base prefix. +func CidrSubnets(prefix cty.Value, newbits ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(newbits)+1) + args[0] = prefix + copy(args[1:], newbits) + return CidrSubnetsFunc.Call(args) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go new file mode 100644 index 000000000..e6898457b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/collection.go @@ -0,0 +1,1519 @@ +package funcs + +import ( + "errors" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var ElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + list := args[0] + listTy := list.Type() + switch { + case listTy.IsListType(): + return listTy.ElementType(), nil + case listTy.IsTupleType(): + if !args[1].IsKnown() { + // If the index isn't known yet then we can't predict the + // result type since each tuple element can have its own type. + return cty.DynamicPseudoType, nil + } + + etys := listTy.TupleElementTypes() + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // e.g. fractional number where whole number is required + return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) + } + if len(etys) == 0 { + return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") + } + index = index % len(etys) + return etys[index], nil + default: + return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // can't happen because we checked this in the Type function above + return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) + } + + if !args[0].IsKnown() { + return cty.UnknownVal(retType), nil + } + + l := args[0].LengthInt() + if l == 0 { + return cty.DynamicVal, errors.New("cannot use element function with an empty list") + } + index = index % l + + // We did all the necessary type checks in the type function above, + // so this is guaranteed not to fail. + return args[0].Index(cty.NumberIntVal(int64(index))), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// CoalesceListFunc constructs a function that takes any number of list arguments +// and returns the first one that isn't empty. +var CoalesceListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + // if any argument is unknown, we can't be certain know which type we will return + if !arg.IsKnown() { + return cty.DynamicPseudoType, nil + } + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() { + return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") + } + + argTypes[i] = arg.Type() + } + + last := argTypes[0] + // If there are mixed types, we have to return a dynamic type. + for _, next := range argTypes[1:] { + if !next.Equals(last) { + return cty.DynamicPseudoType, nil + } + } + + return last, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsKnown() { + // If we run into an unknown list at some point, we can't + // predict the final result yet. (If there's a known, non-empty + // arg before this then we won't get here.) + return cty.UnknownVal(retType), nil + } + + if arg.LengthInt() > 0 { + return arg, nil + } + } + + return cty.NilVal, errors.New("no non-null arguments") + }, +}) + +// CompactFunc constructs a function that takes a list of strings and returns a new list +// with any empty string elements removed. +var CompactFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet return a compacted list + return cty.UnknownVal(retType), nil + } + + var outputList []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + if v.IsNull() || v.AsString() == "" { + continue + } + outputList = append(outputList, v) + } + + if len(outputList) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +// ContainsFunc constructs a function that determines whether a given list or +// set contains a given single value as one of its elements. +var ContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + arg := args[0] + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { + return cty.NilVal, errors.New("argument must be list, tuple, or set") + } + + _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) + if err != nil { + return cty.False, nil + } + + return cty.True, nil + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// DistinctFunc constructs a function that takes a list and returns a new list +// with any duplicate elements removed. +var DistinctFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + var list []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + list, err = appendIfMissing(list, v) + if err != nil { + return cty.NilVal, err + } + } + + if len(list) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(list), nil + }, +}) + +// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, +// returning a list of lists. +var ChunklistFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "size", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.List(args[0].Type()), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + + if listVal.LengthInt() == 0 { + return cty.ListValEmpty(listVal.Type()), nil + } + + var size int + err = gocty.FromCtyValue(args[1], &size) + if err != nil { + return cty.NilVal, fmt.Errorf("invalid index: %s", err) + } + + if size < 0 { + return cty.NilVal, errors.New("the size argument must be positive") + } + + output := make([]cty.Value, 0) + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, listVal) + return cty.ListVal(output), nil + } + + chunk := make([]cty.Value, 0) + + l := args[0].LengthInt() + i := 0 + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + chunk = append(chunk, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + output = append(output, cty.ListVal(chunk)) + chunk = make([]cty.Value, 0) + } + i++ + } + + return cty.ListVal(output), nil + }, +}) + +// FlattenFunc constructs a function that takes a list and replaces any elements +// that are lists with a flattened sequence of the list contents. +var FlattenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsWhollyKnown() { + return cty.DynamicPseudoType, nil + } + + argTy := args[0].Type() + if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { + return cty.NilType, errors.New("can only flatten lists, sets and tuples") + } + + retVal, known := flattener(args[0]) + if !known { + return cty.DynamicPseudoType, nil + } + + tys := make([]cty.Type, len(retVal)) + for i, ty := range retVal { + tys[i] = ty.Type() + } + return cty.Tuple(tys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + if inputList.LengthInt() == 0 { + return cty.EmptyTupleVal, nil + } + + out, known := flattener(inputList) + if !known { + return cty.UnknownVal(retType), nil + } + + return cty.TupleVal(out), nil + }, +}) + +// Flatten until it's not a cty.List, and return whether the value is known. +// We can flatten lists with unknown values, as long as they are not +// lists themselves. +func flattener(flattenList cty.Value) ([]cty.Value, bool) { + out := make([]cty.Value, 0) + for it := flattenList.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { + if !val.IsKnown() { + return out, false + } + + res, known := flattener(val) + if !known { + return res, known + } + out = append(out, res...) + } else { + out = append(out, val) + } + } + return out, true +} + +// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. +var KeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsMapType(): + return cty.List(cty.String), nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + // All of our result elements will be strings, and atys just + // decides how many there are. + etys := make([]cty.Type, len(atys)) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + m := args[0] + var keys []cty.Value + + switch { + case m.Type().IsObjectType(): + // In this case we allow unknown values so we must work only with + // the attribute _types_, not with the value itself. + var names []string + for name := range m.Type().AttributeTypes() { + names = append(names, name) + } + sort.Strings(names) // same ordering guaranteed by cty's ElementIterator + if len(names) == 0 { + return cty.EmptyTupleVal, nil + } + keys = make([]cty.Value, len(names)) + for i, name := range names { + keys[i] = cty.StringVal(name) + } + return cty.TupleVal(keys), nil + default: + if !m.IsKnown() { + return cty.UnknownVal(retType), nil + } + + // cty guarantees that ElementIterator will iterate in lexicographical + // order by key. + for it := args[0].ElementIterator(); it.Next(); { + k, _ := it.Element() + keys = append(keys, k) + } + if len(keys) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(keys), nil + } + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + argTypes[i] = arg.Type() + } + + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.List(retType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newList := make([]cty.Value, 0, len(args)) + + for _, arg := range args { + // We already know this will succeed because of the checks in our Type func above + arg, _ = convert.Convert(arg, retType.ElementType()) + newList = append(newList, arg) + } + + return cty.ListVal(newList), nil + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + key := args[1].AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + if len(args) == 3 { + _, err = convert.Convert(args[2], ty.ElementType()) + if err != nil { + return cty.NilType, function.NewArgErrorf(2, "the default value must have the same type as the map elements") + } + } + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + defaultVal = args[2] + defaultValueSet = true + } + + mapVar := args[0] + lookupKey := args[1].AsString() + + if !mapVar.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + return mapVar.Index(cty.StringVal(lookupKey)), nil + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal, nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find '%s'", lookupKey) + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 2 || len(args)%2 != 0 { + return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) + } + + argTypes := make([]cty.Type, len(args)/2) + index := 0 + + for i := 0; i < len(args); i += 2 { + argTypes[index] = args[i+1].Type() + index++ + } + + valType, _ := convert.UnifyUnsafe(argTypes) + if valType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.Map(valType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + } + + outputMap := make(map[string]cty.Value) + + for i := 0; i < len(args); i += 2 { + + key := args[i].AsString() + + err := gocty.FromCtyValue(args[i], &key) + if err != nil { + return cty.NilVal, err + } + + val := args[i+1] + + var variable cty.Value + err = gocty.FromCtyValue(val, &variable) + if err != nil { + return cty.NilVal, err + } + + // We already know this will succeed because of the checks in our Type func above + variable, _ = convert.Convert(variable, retType.ElementType()) + + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return cty.MapVal(outputMap), nil + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// MergeFunc constructs a function that takes an arbitrary number of maps and +// returns a single map that contains a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +var MergeFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "maps", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + outputMap := make(map[string]cty.Value) + + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { + return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) + } + for it := arg.ElementIterator(); it.Next(); { + k, v := it.Element() + outputMap[k.AsString()] = v + } + } + return cty.ObjectVal(outputMap), nil + }, +}) + +// ReverseFunc takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + argTy := args[0].Type() + switch { + case argTy.IsTupleType(): + argTys := argTy.TupleElementTypes() + retTys := make([]cty.Type, len(argTys)) + for i, ty := range argTys { + retTys[len(retTys)-i-1] = ty + } + return cty.Tuple(retTys), nil + case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list + return cty.List(argTy.ElementType()), nil + default: + return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + in := args[0].AsValueSlice() + outVals := make([]cty.Value, len(in)) + for i, v := range in { + outVals[len(outVals)-i-1] = v + } + switch { + case retType.IsTupleType(): + return cty.TupleVal(outVals), nil + default: + if len(outVals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(outVals), nil + } + }, +}) + +// SetProductFunc calculates the cartesian product of two or more sets or +// sequences. If the arguments are all lists then the result is a list of tuples, +// preserving the ordering of all of the input lists. Otherwise the result is a +// set of tuples. +var SetProductFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "sets", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (retType cty.Type, err error) { + if len(args) < 2 { + return cty.NilType, errors.New("at least two arguments are required") + } + + listCount := 0 + elemTys := make([]cty.Type, len(args)) + for i, arg := range args { + aty := arg.Type() + switch { + case aty.IsSetType(): + elemTys[i] = aty.ElementType() + case aty.IsListType(): + elemTys[i] = aty.ElementType() + listCount++ + case aty.IsTupleType(): + // We can accept a tuple type only if there's some common type + // that all of its elements can be converted to. + allEtys := aty.TupleElementTypes() + if len(allEtys) == 0 { + elemTys[i] = cty.DynamicPseudoType + listCount++ + break + } + ety, _ := convert.UnifyUnsafe(allEtys) + if ety == cty.NilType { + return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") + } + elemTys[i] = ety + listCount++ + default: + return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") + } + } + + if listCount == len(args) { + return cty.List(cty.Tuple(elemTys)), nil + } + return cty.Set(cty.Tuple(elemTys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + ety := retType.ElementType() + + total := 1 + for _, arg := range args { + // Because of our type checking function, we are guaranteed that + // all of the arguments are known, non-null values of types that + // support LengthInt. + total *= arg.LengthInt() + } + + if total == 0 { + // If any of the arguments was an empty collection then our result + // is also an empty collection, which we'll short-circuit here. + if retType.IsListType() { + return cty.ListValEmpty(ety), nil + } + return cty.SetValEmpty(ety), nil + } + + subEtys := ety.TupleElementTypes() + product := make([][]cty.Value, total) + + b := make([]cty.Value, total*len(args)) + n := make([]int, len(args)) + s := 0 + argVals := make([][]cty.Value, len(args)) + for i, arg := range args { + argVals[i] = arg.AsValueSlice() + } + + for i := range product { + e := s + len(args) + pi := b[s:e] + product[i] = pi + s = e + + for j, n := range n { + val := argVals[j][n] + ty := subEtys[j] + if !val.Type().Equals(ty) { + var err error + val, err = convert.Convert(val, ty) + if err != nil { + // Should never happen since we checked this in our + // type-checking function. + return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) + } + } + pi[j] = val + } + + for j := len(n) - 1; j >= 0; j-- { + n[j]++ + if n[j] < len(argVals[j]) { + break + } + n[j] = 0 + } + } + + productVals := make([]cty.Value, total) + for i, vals := range product { + productVals[i] = cty.TupleVal(vals) + } + + if retType.IsListType() { + return cty.ListVal(productVals), nil + } + return cty.SetVal(productVals), nil + }, +}) + +// SliceFunc constructs a function that extracts some consecutive elements +// from within a list. +var SliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "start_index", + Type: cty.Number, + }, + { + Name: "end_index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + arg := args[0] + argTy := arg.Type() + + if argTy.IsSetType() { + return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") + } + if !argTy.IsListType() && !argTy.IsTupleType() { + return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") + } + + startIndex, endIndex, idxsKnown, err := sliceIndexes(args) + if err != nil { + return cty.NilType, err + } + + if argTy.IsListType() { + return argTy, nil + } + + if !idxsKnown { + // If we don't know our start/end indices then we can't predict + // the result type if we're planning to return a tuple. + return cty.DynamicPseudoType, nil + } + return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + // we ignore idxsKnown return value here because the indices are always + // known here, or else the call would've short-circuited. + startIndex, endIndex, _, err := sliceIndexes(args) + if err != nil { + return cty.NilVal, err + } + + if endIndex-startIndex == 0 { + if retType.IsTupleType() { + return cty.EmptyTupleVal, nil + } + return cty.ListValEmpty(retType.ElementType()), nil + } + + outputList := inputList.AsValueSlice()[startIndex:endIndex] + + if retType.IsTupleType() { + return cty.TupleVal(outputList), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +func sliceIndexes(args []cty.Value) (int, int, bool, error) { + var startIndex, endIndex, length int + var startKnown, endKnown, lengthKnown bool + + if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known + length = args[0].LengthInt() + lengthKnown = true + } + + if args[1].IsKnown() { + if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) + } + if startIndex < 0 { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") + } + if lengthKnown && startIndex > length { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") + } + startKnown = true + } + if args[2].IsKnown() { + if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) + } + if endIndex < 0 { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") + } + if lengthKnown && endIndex > length { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") + } + endKnown = true + } + if startKnown && endKnown { + if startIndex > endIndex { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") + } + } + return startIndex, endIndex, startKnown && endKnown, nil +} + +// TransposeFunc contructs a function that takes a map of lists of strings and +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ValuesFunc constructs a function that returns a list of the map values, +// in the order of the sorted keys. +var ValuesFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + ty := args[0].Type() + if ty.IsMapType() { + return cty.List(ty.ElementType()), nil + } else if ty.IsObjectType() { + // The result is a tuple type with all of the same types as our + // object type's attributes, sorted in lexicographical order by the + // keys. (This matches the sort order guaranteed by ElementIterator + // on a cty object value.) + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + + tys := make([]cty.Type, len(attrNames)) + for i, name := range attrNames { + tys[i] = atys[name] + } + return cty.Tuple(tys), nil + } + return cty.NilType, errors.New("values() requires a map as the first argument") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + mapVar := args[0] + + // We can just iterate the map/object value here because cty guarantees + // that these types always iterate in key lexicographical order. + var values []cty.Value + for it := mapVar.ElementIterator(); it.Next(); { + _, val := it.Element() + values = append(values, val) + } + + if retType.IsTupleType() { + return cty.TupleVal(values), nil + } + if len(values) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(values), nil + }, +}) + +// ZipmapFunc constructs a function that constructs a map from a list of keys +// and a corresponding list of values. +var ZipmapFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "keys", + Type: cty.List(cty.String), + }, + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + keys := args[0] + values := args[1] + valuesTy := values.Type() + + switch { + case valuesTy.IsListType(): + return cty.Map(values.Type().ElementType()), nil + case valuesTy.IsTupleType(): + if !keys.IsWhollyKnown() { + // Since zipmap with a tuple produces an object, we need to know + // all of the key names before we can predict our result type. + return cty.DynamicPseudoType, nil + } + + keysRaw := keys.AsValueSlice() + valueTypesRaw := valuesTy.TupleElementTypes() + if len(keysRaw) != len(valueTypesRaw) { + return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) + } + atys := make(map[string]cty.Type, len(valueTypesRaw)) + for i, keyVal := range keysRaw { + if keyVal.IsNull() { + return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) + } + key := keyVal.AsString() + atys[key] = valueTypesRaw[i] + } + return cty.Object(atys), nil + + default: + return cty.NilType, errors.New("values argument must be a list or tuple value") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + keys := args[0] + values := args[1] + + if !keys.IsWhollyKnown() { + // Unknown map keys and object attributes are not supported, so + // our entire result must be unknown in this case. + return cty.UnknownVal(retType), nil + } + + // both keys and values are guaranteed to be shallowly-known here, + // because our declared params above don't allow unknown or null values. + if keys.LengthInt() != values.LengthInt() { + return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) + } + + output := make(map[string]cty.Value) + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, v := it.Element() + val := values.Index(cty.NumberIntVal(int64(i))) + output[v.AsString()] = val + i++ + } + + switch { + case retType.IsMapType(): + if len(output) == 0 { + return cty.MapValEmpty(retType.ElementType()), nil + } + return cty.MapVal(output), nil + case retType.IsObjectType(): + return cty.ObjectVal(output), nil + default: + // Should never happen because the type-check function should've + // caught any other case. + return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) + } + }, +}) + +// helper function to add an element to a list, if it does not already exist +func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { + for _, ele := range slice { + eq, err := stdlib.Equal(ele, element) + if err != nil { + return slice, err + } + if eq.True() { + return slice, nil + } + } + return append(slice, element), nil +} + +// Element returns a single element from a given list at the given index. If +// index is greater than the length of the list then it is wrapped modulo +// the list length. +func Element(list, index cty.Value) (cty.Value, error) { + return ElementFunc.Call([]cty.Value{list, index}) +} + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// CoalesceList takes any number of list arguments and returns the first one that isn't empty. +func CoalesceList(args ...cty.Value) (cty.Value, error) { + return CoalesceListFunc.Call(args) +} + +// Compact takes a list of strings and returns a new list +// with any empty string elements removed. +func Compact(list cty.Value) (cty.Value, error) { + return CompactFunc.Call([]cty.Value{list}) +} + +// Contains determines whether a given list contains a given single value +// as one of its elements. +func Contains(list, value cty.Value) (cty.Value, error) { + return ContainsFunc.Call([]cty.Value{list, value}) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// Distinct takes a list and returns a new list with any duplicate elements removed. +func Distinct(list cty.Value) (cty.Value, error) { + return DistinctFunc.Call([]cty.Value{list}) +} + +// Chunklist splits a single list into fixed-size chunks, returning a list of lists. +func Chunklist(list, size cty.Value) (cty.Value, error) { + return ChunklistFunc.Call([]cty.Value{list, size}) +} + +// Flatten takes a list and replaces any elements that are lists with a flattened +// sequence of the list contents. +func Flatten(list cty.Value) (cty.Value, error) { + return FlattenFunc.Call([]cty.Value{list}) +} + +// Keys takes a map and returns a sorted list of the map keys. +func Keys(inputMap cty.Value) (cty.Value, error) { + return KeysFunc.Call([]cty.Value{inputMap}) +} + +// List takes any number of list arguments and returns a list containing those +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// Merge takes an arbitrary number of maps and returns a single map that contains +// a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +func Merge(maps ...cty.Value) (cty.Value, error) { + return MergeFunc.Call(maps) +} + +// Reverse takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +func Reverse(list cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{list}) +} + +// SetProduct computes the cartesian product of sets or sequences. +func SetProduct(sets ...cty.Value) (cty.Value, error) { + return SetProductFunc.Call(sets) +} + +// Slice extracts some consecutive elements from within a list. +func Slice(list, start, end cty.Value) (cty.Value, error) { + return SliceFunc.Call([]cty.Value{list, start, end}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} + +// Values returns a list of the map values, in the order of the sorted keys. +// This function only works on flat maps. +func Values(values cty.Value) (cty.Value, error) { + return ValuesFunc.Call([]cty.Value{values}) +} + +// Zipmap constructs a map from a list of keys and a corresponding list of values. +func Zipmap(keys, values cty.Value) (cty.Value, error) { + return ZipmapFunc.Call([]cty.Value{keys, values}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go new file mode 100644 index 000000000..83f859797 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/conversion.go @@ -0,0 +1,87 @@ +package funcs + +import ( + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := args[0].Type() + switch { + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go new file mode 100644 index 000000000..28074fb13 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/crypto.go @@ -0,0 +1,325 @@ +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "hash" + + uuidv5 "github.com/google/uuid" + uuid "github.com/hashicorp/go-uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NameSpaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NameSpaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NameSpaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NameSpaceX500 + default: + if namespace, err = uuidv5.Parse(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewSHA1(namespace, []byte(val)).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return cty.UnknownVal(cty.String), fmt.Errorf( + "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", + ) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + h := hf() + h.Write(src) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go new file mode 100644 index 000000000..5dae19877 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/datetime.go @@ -0,0 +1,70 @@ +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go new file mode 100644 index 000000000..af93f08dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/encoding.go @@ -0,0 +1,140 @@ +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go new file mode 100644 index 000000000..786d3e74b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/filesystem.go @@ -0,0 +1,360 @@ +package funcs + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(baseDir, cty.StringVal(fn)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicVal, err + } + return renderTmpl(expr, args[1]) + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func readFileBytes(baseDir, path string) ([]byte, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + src, err := ioutil.ReadFile(path) + if err != nil { + // ReadFile does not return Terraform-user-friendly error + // messages, so we'll provide our own. + if os.IsNotExist(err) { + return nil, fmt.Errorf("no file exists at %s", path) + } + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go new file mode 100644 index 000000000..c813f47bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/number.go @@ -0,0 +1,217 @@ +package funcs + +import ( + "math" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CeilFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var CeilFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Ceil(val))), nil + }, +}) + +// FloorFunc contructs a function that returns the closest whole number lesser +// than or equal to the given value. +var FloorFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Floor(val))), nil + }, +}) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// ParseIntFunc contructs a function that parses a string argument and returns an integer of the specified base. +var ParseIntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "number", + Type: cty.DynamicPseudoType, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].Type().Equals(cty.String) { + return cty.Number, function.NewArgErrorf(0, "first argument must be a string, not %s", args[0].Type().FriendlyName()) + } + return cty.Number, nil + }, + + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var numstr string + var base int + var err error + + if err = gocty.FromCtyValue(args[0], &numstr); err != nil { + return cty.UnknownVal(cty.String), function.NewArgError(0, err) + } + + if err = gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.Number), function.NewArgError(1, err) + } + + if base < 2 || base > 62 { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 1, + "base must be a whole number between 2 and 62 inclusive", + ) + } + + num, ok := (&big.Int{}).SetString(numstr, base) + if !ok { + return cty.UnknownVal(cty.Number), function.NewArgErrorf( + 0, + "cannot parse %q as a base %d integer", + numstr, + base, + ) + } + + parsedNum := cty.NumberVal((&big.Float{}).SetInt(num)) + + return parsedNum, nil + }, +}) + +// Ceil returns the closest whole number greater than or equal to the given value. +func Ceil(num cty.Value) (cty.Value, error) { + return CeilFunc.Call([]cty.Value{num}) +} + +// Floor returns the closest whole number lesser than or equal to the given value. +func Floor(num cty.Value) (cty.Value, error) { + return FloorFunc.Call([]cty.Value{num}) +} + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} + +// ParseInt parses a string argument and returns an integer of the specified base. +func ParseInt(num cty.Value, base cty.Value) (cty.Value, error) { + return ParseIntFunc.Call([]cty.Value{num, base}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go new file mode 100644 index 000000000..c9ddf19e3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs/string.go @@ -0,0 +1,280 @@ +package funcs + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var JoinFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "lists", + Type: cty.List(cty.String), + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + listVals := args[1:] + if len(listVals) < 1 { + return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") + } + + l := 0 + for _, list := range listVals { + if !list.IsWhollyKnown() { + return cty.UnknownVal(cty.String), nil + } + l += list.LengthInt() + } + + items := make([]string, 0, l) + for ai, list := range listVals { + ei := 0 + for it := list.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.IsNull() { + if len(listVals) > 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) + } + items = append(items, val.AsString()) + ei++ + } + } + + return cty.StringVal(strings.Join(items, sep)), nil + }, +}) + +var SortFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet preduct the order of the result. + return cty.UnknownVal(retType), nil + } + if listVal.LengthInt() == 0 { // Easy path + return listVal, nil + } + + list := make([]string, 0, listVal.LengthInt()) + for it := listVal.ElementIterator(); it.Next(); { + iv, v := it.Element() + if v.IsNull() { + return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) + } + list = append(list, v.AsString()) + } + + sort.Strings(list) + retVals := make([]cty.Value, len(list)) + for i, s := range list { + retVals[i] = cty.StringVal(s) + } + return cty.ListVal(retVals), nil + }, +}) + +var SplitFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + str := args[1].AsString() + elems := strings.Split(str, sep) + elemVals := make([]cty.Value, len(elems)) + for i, s := range elems { + elemVals[i] = cty.StringVal(s) + } + if len(elemVals) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(elemVals), nil + }, +}) + +// ChompFunc constructions a function that removes newline characters at the end of a string. +var ChompFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil + }, +}) + +// IndentFunc constructions a function that adds a given number of spaces to the +// beginnings of all but the first line in a given multi-line string. +var IndentFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "spaces", + Type: cty.Number, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var spaces int + if err := gocty.FromCtyValue(args[0], &spaces); err != nil { + return cty.UnknownVal(cty.String), err + } + data := args[1].AsString() + pad := strings.Repeat(" ", spaces) + return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil + }, +}) + +// ReplaceFunc constructions a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// TitleFunc constructions a function that converts the first letter of each word +// in the given string to uppercase. +var TitleFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.Title(args[0].AsString())), nil + }, +}) + +// TrimSpaceFunc constructions a function that removes any space characters from +// the start and end of the given string. +var TrimSpaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil + }, +}) + +// Join concatenates together the string elements of one or more lists with a +// given separator. +func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(lists)+1) + args[0] = sep + copy(args[1:], lists) + return JoinFunc.Call(args) +} + +// Sort re-orders the elements of a given list of strings so that they are +// in ascending lexicographical order. +func Sort(list cty.Value) (cty.Value, error) { + return SortFunc.Call([]cty.Value{list}) +} + +// Split divides a given string by a given separator, returning a list of +// strings containing the characters between the separator sequences. +func Split(sep, str cty.Value) (cty.Value, error) { + return SplitFunc.Call([]cty.Value{sep, str}) +} + +// Chomp removes newline characters at the end of a string. +func Chomp(str cty.Value) (cty.Value, error) { + return ChompFunc.Call([]cty.Value{str}) +} + +// Indent adds a given number of spaces to the beginnings of all but the first +// line in a given multi-line string. +func Indent(spaces, str cty.Value) (cty.Value, error) { + return IndentFunc.Call([]cty.Value{spaces, str}) +} + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} + +// Title converts the first letter of each word in the given string to uppercase. +func Title(str cty.Value) (cty.Value, error) { + return TitleFunc.Call([]cty.Value{str}) +} + +// TrimSpace removes any space characters from the start and end of the given string. +func TrimSpace(str cty.Value) (cty.Value, error) { + return TrimSpaceFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go new file mode 100644 index 000000000..a3c490664 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/functions.go @@ -0,0 +1,146 @@ +package lang + +import ( + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "ceil": funcs.CeilFunc, + "chomp": funcs.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "cidrsubnets": funcs.CidrSubnetsFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": funcs.CoalesceListFunc, + "compact": funcs.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": funcs.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": funcs.DistinctFunc, + "element": funcs.ElementFunc, + "chunklist": funcs.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": funcs.FlattenFunc, + "floor": funcs.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": funcs.IndentFunc, + "index": funcs.IndexFunc, + "join": funcs.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": funcs.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": funcs.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": funcs.MergeFunc, + "min": stdlib.MinFunc, + "parseint": funcs.ParseIntFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": funcs.PowFunc, + "range": stdlib.RangeFunc, + "regex": stdlib.RegexFunc, + "regexall": stdlib.RegexAllFunc, + "replace": funcs.ReplaceFunc, + "reverse": funcs.ReverseFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": funcs.SetProductFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": funcs.SignumFunc, + "slice": funcs.SliceFunc, + "sort": funcs.SortFunc, + "split": funcs.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timestamp": funcs.TimestampFunc, + "timeadd": funcs.TimeAddFunc, + "title": funcs.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trimspace": funcs.TrimSpaceFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": funcs.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": funcs.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + } + s.funcsLock.Unlock() + + return s.funcs +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go new file mode 100644 index 000000000..7923d5113 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/references.go @@ -0,0 +1,81 @@ +package lang + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang/blocktoattr" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'terraform' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + return References(traversals) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + return References(traversals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go new file mode 100644 index 000000000..a720cca68 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/lang/scope.go @@ -0,0 +1,34 @@ +package lang + +import ( + "sync" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcs map[string]function.Function + funcsLock sync.Mutex +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go new file mode 100644 index 000000000..0d7d664fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/doc.go @@ -0,0 +1,3 @@ +// Package modsdir is an internal package containing the model types used to +// represent the manifest of modules in a local modules cache directory. +package modsdir diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go new file mode 100644 index 000000000..2d45c8520 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/manifest.go @@ -0,0 +1,138 @@ +package modsdir + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Record represents some metadata about an installed module, as part +// of a ModuleManifest. +type Record struct { + // Key is a unique identifier for this particular module, based on its + // position within the static module tree. + Key string `json:"Key"` + + // SourceAddr is the source address given for this module in configuration. + // This is used only to detect if the source was changed in configuration + // since the module was last installed, which means that the installer + // must re-install it. + SourceAddr string `json:"Source"` + + // Version is the exact version of the module, which results from parsing + // VersionStr. nil for un-versioned modules. + Version *version.Version `json:"-"` + + // VersionStr is the version specifier string. This is used only for + // serialization in snapshots and should not be accessed or updated + // by any other codepaths; use "Version" instead. + VersionStr string `json:"Version,omitempty"` + + // Dir is the path to the local directory where the module is installed. + Dir string `json:"Dir"` +} + +// Manifest is a map used to keep track of the filesystem locations +// and other metadata about installed modules. +// +// The configuration loader refers to this, while the module installer updates +// it to reflect any changes to the installed modules. +type Manifest map[string]Record + +func (m Manifest) ModuleKey(path addrs.Module) string { + return path.String() +} + +// manifestSnapshotFile is an internal struct used only to assist in our JSON +// serialization of manifest snapshots. It should not be used for any other +// purpose. +type manifestSnapshotFile struct { + Records []Record `json:"Modules"` +} + +func ReadManifestSnapshot(r io.Reader) (Manifest, error) { + src, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + if len(src) == 0 { + // This should never happen, but we'll tolerate it as if it were + // a valid empty JSON object. + return make(Manifest), nil + } + + var read manifestSnapshotFile + err = json.Unmarshal(src, &read) + + new := make(Manifest) + for _, record := range read.Records { + if record.VersionStr != "" { + record.Version, err = version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) + } + } + if _, exists := new[record.Key]; exists { + // This should never happen in any valid file, so we'll catch it + // and report it to avoid confusing/undefined behavior if the + // snapshot file was edited incorrectly outside of Terraform. + return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) + } + new[record.Key] = record + } + return new, nil +} + +func ReadManifestSnapshotForDir(dir string) (Manifest, error) { + fn := filepath.Join(dir, ManifestSnapshotFilename) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return make(Manifest), nil // missing file is okay and treated as empty + } + return nil, err + } + return ReadManifestSnapshot(r) +} + +func (m Manifest) WriteSnapshot(w io.Writer) error { + var write manifestSnapshotFile + + for _, record := range m { + // Make sure VersionStr is in sync with Version, since we encourage + // callers to manipulate Version and ignore VersionStr. + if record.Version != nil { + record.VersionStr = record.Version.String() + } else { + record.VersionStr = "" + } + write.Records = append(write.Records, record) + } + + src, err := json.Marshal(write) + if err != nil { + return err + } + + _, err = w.Write(src) + return err +} + +func (m Manifest) WriteSnapshotToDir(dir string) error { + fn := filepath.Join(dir, ManifestSnapshotFilename) + log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) + w, err := os.Create(fn) + if err != nil { + return err + } + return m.WriteSnapshot(w) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go new file mode 100644 index 000000000..9ebb52431 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/modsdir/paths.go @@ -0,0 +1,3 @@ +package modsdir + +const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go new file mode 100644 index 000000000..c80588718 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/dependencies.go @@ -0,0 +1,43 @@ +package moduledeps + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Providers describes a set of provider dependencies for a given module. +// +// Each named provider instance can have one version constraint. +type Providers map[ProviderInstance]ProviderDependency + +// ProviderDependency describes the dependency for a particular provider +// instance, including both the set of allowed versions and the reason for +// the dependency. +type ProviderDependency struct { + Constraints discovery.Constraints + Reason ProviderDependencyReason +} + +// ProviderDependencyReason is an enumeration of reasons why a dependency might be +// present. +type ProviderDependencyReason int + +const ( + // ProviderDependencyExplicit means that there is an explicit "provider" + // block in the configuration for this module. + ProviderDependencyExplicit ProviderDependencyReason = iota + + // ProviderDependencyImplicit means that there is no explicit "provider" + // block but there is at least one resource that uses this provider. + ProviderDependencyImplicit + + // ProviderDependencyInherited is a special case of + // ProviderDependencyImplicit where a parent module has defined a + // configuration for the provider that has been inherited by at least one + // resource in this module. + ProviderDependencyInherited + + // ProviderDependencyFromState means that this provider is not currently + // referenced by configuration at all, but some existing instances in + // the state still depend on it. + ProviderDependencyFromState +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go new file mode 100644 index 000000000..7eff08315 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/doc.go @@ -0,0 +1,7 @@ +// Package moduledeps contains types that can be used to describe the +// providers required for all of the modules in a module tree. +// +// It does not itself contain the functionality for populating such +// data structures; that's in Terraform core, since this package intentionally +// does not depend on terraform core to avoid package dependency cycles. +package moduledeps diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go new file mode 100644 index 000000000..5189acfc1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/module.go @@ -0,0 +1,134 @@ +package moduledeps + +import ( + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// PluginRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) PluginRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for inst, dep := range m.Providers { + // m.Providers is keyed on provider names, such as "aws.foo". + // a PluginRequirements wants keys to be provider *types*, such + // as "aws". If there are multiple aliases for the same + // provider then we will flatten them into a single requirement + // by combining their constraint sets. + pty := inst.Type() + if existing, exists := ret[pty]; exists { + ret[pty].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[pty] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllPluginRequirements calls PluginRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllPluginRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.PluginRequirements()) + return nil + }) + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go new file mode 100644 index 000000000..89ceefb2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps/provider.go @@ -0,0 +1,30 @@ +package moduledeps + +import ( + "strings" +) + +// ProviderInstance describes a particular provider instance by its full name, +// like "null" or "aws.foo". +type ProviderInstance string + +// Type returns the provider type of this instance. For example, for an instance +// named "aws.foo" the type is "aws". +func (p ProviderInstance) Type() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + t = t[:dotPos] + } + return t +} + +// Alias returns the alias of this provider, if any. An instance named "aws.foo" +// has the alias "foo", while an instance named just "docker" has no alias, +// so the empty string would be returned. +func (p ProviderInstance) Alias() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + return t[dotPos+1:] + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go new file mode 100644 index 000000000..c653b106b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action.go @@ -0,0 +1,22 @@ +package plans + +type Action rune + +const ( + NoOp Action = 0 + Create Action = '+' + Read Action = '←' + Update Action = '~' + DeleteThenCreate Action = '∓' + CreateThenDelete Action = '±' + Delete Action = '-' +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type Action + +// IsReplace returns true if the action is one of the two actions that +// represents replacing an existing object with a new object: +// DeleteThenCreate or CreateThenDelete. +func (a Action) IsReplace() bool { + return a == DeleteThenCreate || a == CreateThenDelete +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go new file mode 100644 index 000000000..be43ab175 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/action_string.go @@ -0,0 +1,49 @@ +// Code generated by "stringer -type Action"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoOp-0] + _ = x[Create-43] + _ = x[Read-8592] + _ = x[Update-126] + _ = x[DeleteThenCreate-8723] + _ = x[CreateThenDelete-177] + _ = x[Delete-45] +} + +const ( + _Action_name_0 = "NoOp" + _Action_name_1 = "Create" + _Action_name_2 = "Delete" + _Action_name_3 = "Update" + _Action_name_4 = "CreateThenDelete" + _Action_name_5 = "Read" + _Action_name_6 = "DeleteThenCreate" +) + +func (i Action) String() string { + switch { + case i == 0: + return _Action_name_0 + case i == 43: + return _Action_name_1 + case i == 45: + return _Action_name_2 + case i == 126: + return _Action_name_3 + case i == 177: + return _Action_name_4 + case i == 8592: + return _Action_name_5 + case i == 8723: + return _Action_name_6 + default: + return "Action(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go new file mode 100644 index 000000000..5c2028c83 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes.go @@ -0,0 +1,308 @@ +package plans + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// Changes describes various actions that Terraform will attempt to take if +// the corresponding plan is applied. +// +// A Changes object can be rendered into a visual diff (by the caller, using +// code in another package) for display to the user. +type Changes struct { + // Resources tracks planned changes to resource instance objects. + Resources []*ResourceInstanceChangeSrc + + // Outputs tracks planned changes output values. + // + // Note that although an in-memory plan contains planned changes for + // outputs throughout the configuration, a plan serialized + // to disk retains only the root outputs because they are + // externally-visible, while other outputs are implementation details and + // can be easily re-calculated during the apply phase. Therefore only root + // module outputs will survive a round-trip through a plan file. + Outputs []*OutputChangeSrc +} + +// NewChanges returns a valid Changes object that describes no changes. +func NewChanges() *Changes { + return &Changes{} +} + +func (c *Changes) Empty() bool { + for _, res := range c.Resources { + if res.Action != NoOp { + return false + } + } + return true +} + +// ResourceInstance returns the planned change for the current object of the +// resource instance of the given address, if any. Returns nil if no change is +// planned. +func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { + return rc + } + } + + return nil +} + +// ResourceInstanceDeposed returns the plan change of a deposed object of +// the resource instance of the given address, if any. Returns nil if no change +// is planned. +func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == key { + return rc + } + } + + return nil +} + +// OutputValue returns the planned change for the output value with the +// given address, if any. Returns nil if no change is planned. +func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { + addrStr := addr.String() + for _, oc := range c.Outputs { + if oc.Addr.String() == addrStr { + return oc + } + } + + return nil +} + +// SyncWrapper returns a wrapper object around the receiver that can be used +// to make certain changes to the receiver in a concurrency-safe way, as long +// as all callers share the same wrapper object. +func (c *Changes) SyncWrapper() *ChangesSync { + return &ChangesSync{ + changes: c, + } +} + +// ResourceInstanceChange describes a change to a particular resource instance +// object. +type ResourceInstanceChange struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // Change is an embedded description of the change. + Change + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the implied type of the +// corresponding resource type schema for correct operation. +func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { + cs, err := rc.Change.Encode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChangeSrc{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + ProviderAddr: rc.ProviderAddr, + ChangeSrc: *cs, + RequiredReplace: rc.RequiredReplace, + Private: rc.Private, + }, err +} + +// Simplify will, where possible, produce a change with a simpler action than +// the receiever given a flag indicating whether the caller is dealing with +// a normal apply or a destroy. This flag deals with the fact that Terraform +// Core uses a specialized graph node type for destroying; only that +// specialized node should set "destroying" to true. +// +// The following table shows the simplification behavior: +// +// Action Destroying? New Action +// --------+-------------+----------- +// Create true NoOp +// Delete false NoOp +// Replace true Delete +// Replace false Create +// +// For any combination not in the above table, the Simplify just returns the +// receiver as-is. +func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { + if destroying { + switch rc.Action { + case Delete: + // We'll fall out and just return rc verbatim, then. + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Delete, + Before: rc.Before, + After: cty.NullVal(rc.Before.Type()), + }, + } + default: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + } + } else { + switch rc.Action { + case Delete: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Create, + Before: cty.NullVal(rc.After.Type()), + After: rc.After, + }, + } + } + } + + // If we fall out here then our change is already simple enough. + return rc +} + +// OutputChange describes a change to an output value. +type OutputChange struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // Change is an embedded description of the change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + Change + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. +func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { + cs, err := oc.Change.Encode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChangeSrc{ + Addr: oc.Addr, + ChangeSrc: *cs, + Sensitive: oc.Sensitive, + }, err +} + +// Change describes a single change with a given action. +type Change struct { + // Action defines what kind of change is being made. + Action Action + + // Interpretation of Before and After depend on Action: + // + // NoOp Before and After are the same, unchanged value + // Create Before is nil, and After is the expected value after create. + // Read Before is any prior value (nil if no prior), and After is the + // value that was or will be read. + // Update Before is the value prior to update, and After is the expected + // value after update. + // Replace As with Update. + // Delete Before is the value prior to delete, and After is always nil. + // + // Unknown values may appear anywhere within the Before and After values, + // either as the values themselves or as nested elements within known + // collections/structures. + Before, After cty.Value +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the type constraint +// that the values are expected to conform to; to properly decode the values +// later an identical type constraint must be provided at that time. +// +// Where a Change is embedded in some other struct, it's generally better +// to call the corresponding Encode method of that struct rather than working +// directly with its embedded Change. +func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { + beforeDV, err := NewDynamicValue(c.Before, ty) + if err != nil { + return nil, err + } + afterDV, err := NewDynamicValue(c.After, ty) + if err != nil { + return nil, err + } + + return &ChangeSrc{ + Action: c.Action, + Before: beforeDV, + After: afterDV, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go new file mode 100644 index 000000000..97bc8da7c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_src.go @@ -0,0 +1,190 @@ +package plans + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. +// Pass the associated resource type's schema type to method Decode to +// obtain a ResourceInstancChange. +type ResourceInstanceChangeSrc struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // ChangeSrc is an embedded description of the not-yet-decoded change. + ChangeSrc + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Decode unmarshals the raw representation of the instance object being +// changed. Pass the implied type of the corresponding resource type schema +// for correct operation. +func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { + change, err := rcs.ChangeSrc.Decode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChange{ + Addr: rcs.Addr, + DeposedKey: rcs.DeposedKey, + ProviderAddr: rcs.ProviderAddr, + Change: *change, + RequiredReplace: rcs.RequiredReplace, + Private: rcs.Private, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { + if rcs == nil { + return nil + } + ret := *rcs + + ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) + + if len(ret.Private) != 0 { + private := make([]byte, len(ret.Private)) + copy(private, ret.Private) + ret.Private = private + } + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// OutputChangeSrc describes a change to an output value. +type OutputChangeSrc struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // ChangeSrc is an embedded description of the not-yet-decoded change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + ChangeSrc + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Decode unmarshals the raw representation of the output value being +// changed. +func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { + change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChange{ + Addr: ocs.Addr, + Change: *change, + Sensitive: ocs.Sensitive, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { + if ocs == nil { + return nil + } + ret := *ocs + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// ChangeSrc is a not-yet-decoded Change. +type ChangeSrc struct { + // Action defines what kind of change is being made. + Action Action + + // Before and After correspond to the fields of the same name in Change, + // but have not yet been decoded from the serialized value used for + // storage. + Before, After DynamicValue +} + +// Decode unmarshals the raw representations of the before and after values +// to produce a Change object. Pass the type constraint that the result must +// conform to. +// +// Where a ChangeSrc is embedded in some other struct, it's generally better +// to call the corresponding Decode method of that struct rather than working +// directly with its embedded Change. +func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { + var err error + before := cty.NullVal(ty) + after := cty.NullVal(ty) + + if len(cs.Before) > 0 { + before, err = cs.Before.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'before' value: %s", err) + } + } + if len(cs.After) > 0 { + after, err = cs.After.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'after' value: %s", err) + } + } + return &Change{ + Action: cs.Action, + Before: before, + After: after, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go new file mode 100644 index 000000000..89cc1ab22 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/changes_sync.go @@ -0,0 +1,144 @@ +package plans + +import ( + "fmt" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ChangesSync is a wrapper around a Changes that provides a concurrency-safe +// interface to insert new changes and retrieve copies of existing changes. +// +// Each ChangesSync is independent of all others, so all concurrent writers +// to a particular Changes must share a single ChangesSync. Behavior is +// undefined if any other caller makes changes to the underlying Changes +// object or its nested objects concurrently with any of the methods of a +// particular ChangesSync. +type ChangesSync struct { + lock sync.Mutex + changes *Changes +} + +// AppendResourceInstanceChange records the given resource instance change in +// the set of planned resource changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { + if cs == nil { + panic("AppendResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Resources = append(cs.changes.Resources, s) +} + +// GetResourceInstanceChange searches the set of resource instance changes for +// one matching the given address and generation, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { + if cs == nil { + panic("GetResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + if gen == states.CurrentGen { + return cs.changes.ResourceInstance(addr).DeepCopy() + } + if dk, ok := gen.(states.DeposedKey); ok { + return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() + } + panic(fmt.Sprintf("unsupported generation value %#v", gen)) +} + +// RemoveResourceInstanceChange searches the set of resource instance changes +// for one matching the given address and generation, and removes it from the +// set if it exists. +func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { + if cs == nil { + panic("RemoveResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + dk := states.NotDeposed + if realDK, ok := gen.(states.DeposedKey); ok { + dk = realDK + } + + addrStr := addr.String() + for i, r := range cs.changes.Resources { + if r.Addr.String() != addrStr || r.DeposedKey != dk { + continue + } + copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) + cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] + return + } +} + +// AppendOutputChange records the given output value change in the set of +// planned value changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { + if cs == nil { + panic("AppendOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Outputs = append(cs.changes.Outputs, s) +} + +// GetOutputChange searches the set of output value changes for one matching +// the given address, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValue(addr) +} + +// RemoveOutputChange searches the set of output value changes for one matching +// the given address, and removes it from the set if it exists. +func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { + if cs == nil { + panic("RemoveOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + addrStr := addr.String() + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { + continue + } + copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) + cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] + return + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go new file mode 100644 index 000000000..01ca38923 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/doc.go @@ -0,0 +1,5 @@ +// Package plans contains the types that are used to represent Terraform plans. +// +// A plan describes a set of changes that Terraform will make to update remote +// objects to match with changes to the configuration. +package plans diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go new file mode 100644 index 000000000..51fbb24cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/dynamic_value.go @@ -0,0 +1,96 @@ +package plans + +import ( + "github.com/zclconf/go-cty/cty" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" +) + +// DynamicValue is the representation in the plan of a value whose type cannot +// be determined at compile time, such as because it comes from a schema +// defined in a plugin. +// +// This type is used as an indirection so that the overall plan structure can +// be decoded without schema available, and then the dynamic values accessed +// at a later time once the appropriate schema has been determined. +// +// Internally, DynamicValue is a serialized version of a cty.Value created +// against a particular type constraint. Callers should not access directly +// the serialized form, whose format may change in future. Values of this +// type must always be created by calling NewDynamicValue. +// +// The zero value of DynamicValue is nil, and represents the absense of a +// value within the Go type system. This is distinct from a cty.NullVal +// result, which represents the absense of a value within the cty type system. +type DynamicValue []byte + +// NewDynamicValue creates a DynamicValue by serializing the given value +// against the given type constraint. The value must conform to the type +// constraint, or the result is undefined. +// +// If the value to be encoded has no predefined schema (for example, for +// module output values and input variables), set the type constraint to +// cty.DynamicPseudoType in order to save type information as part of the +// value, and then also pass cty.DynamicPseudoType to method Decode to recover +// the original value. +// +// cty.NilVal can be used to represent the absense of a value, but callers +// must be careful to distinguish values that are absent at the Go layer +// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal +// results). +func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { + // If we're given cty.NilVal (the zero value of cty.Value, which is + // distinct from a typed null value created by cty.NullVal) then we'll + // assume the caller is trying to represent the _absense_ of a value, + // and so we'll return a nil DynamicValue. + if val == cty.NilVal { + return DynamicValue(nil), nil + } + + // Currently our internal encoding is msgpack, via ctymsgpack. + buf, err := ctymsgpack.Marshal(val, ty) + if err != nil { + return nil, err + } + + return DynamicValue(buf), nil +} + +// Decode retrieves the effective value from the receiever by interpreting the +// serialized form against the given type constraint. For correct results, +// the type constraint must match (or be consistent with) the one that was +// used to create the receiver. +// +// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and +// instead represents the absense of a value. +func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { + if v == nil { + return cty.NilVal, nil + } + + return ctymsgpack.Unmarshal([]byte(v), ty) +} + +// ImpliedType returns the type implied by the serialized structure of the +// receiving value. +// +// This will not necessarily be exactly the type that was given when the +// value was encoded, and in particular must not be used for values that +// were encoded with their static type given as cty.DynamicPseudoType. +// It is however safe to use this method for values that were encoded using +// their runtime type as the conforming type, with the result being +// semantically equivalent but with all lists and sets represented as tuples, +// and maps as objects, due to ambiguities of the serialization. +func (v DynamicValue) ImpliedType() (cty.Type, error) { + return ctymsgpack.ImpliedType([]byte(v)) +} + +// Copy produces a copy of the receiver with a distinct backing array. +func (v DynamicValue) Copy() DynamicValue { + if v == nil { + return nil + } + + ret := make(DynamicValue, len(v)) + copy(ret, v) + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go new file mode 100644 index 000000000..ba9cc9611 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/all_null.go @@ -0,0 +1,18 @@ +package objchange + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// AllAttributesNull constructs a non-null cty.Value of the object type implied +// by the given schema that has all of its leaf attributes set to null and all +// of its nested block collections set to zero-length. +// +// This simulates what would result from decoding an empty configuration block +// with the given schema, except that it does not produce errors +func AllAttributesNull(schema *configschema.Block) cty.Value { + // "All attributes null" happens to be the definition of EmptyValue for + // a Block, so we can just delegate to that. + return schema.EmptyValue() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go new file mode 100644 index 000000000..36a7d496c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/compatible.go @@ -0,0 +1,447 @@ +package objchange + +import ( + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// AssertObjectCompatible checks whether the given "actual" value is a valid +// completion of the possibly-partially-unknown "planned" value. +// +// This means that any known leaf value in "planned" must be equal to the +// corresponding value in "actual", and various other similar constraints. +// +// Any inconsistencies are reported by returning a non-zero number of errors. +// These errors are usually (but not necessarily) cty.PathError values +// referring to a particular nested value within the "actual" value. +// +// The two values must have types that conform to the given schema's implied +// type, or this function will panic. +func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { + return assertObjectCompatible(schema, planned, actual, nil) +} + +func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { + var errs []error + if planned.IsNull() && !actual.IsNull() { + errs = append(errs, path.NewErrorf("was absent, but now present")) + return errs + } + if actual.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf("was present, but now absent")) + return errs + } + if planned.IsNull() { + // No further checks possible if both values are null + return errs + } + + for name, attrS := range schema.Attributes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertValueCompatible(plannedV, actualV, path) + if attrS.Sensitive { + if len(moreErrs) > 0 { + // Use a vague placeholder message instead, to avoid disclosing + // sensitive information. + errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) + } + } else { + errs = append(errs, moreErrs...) + } + } + for name, blockS := range schema.BlockTypes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + // As a special case, if there were any blocks whose leaf attributes + // are all unknown then we assume (possibly incorrectly) that the + // HCL dynamic block extension is in use with an unknown for_each + // argument, and so we will do looser validation here that allows + // for those blocks to have expanded into a different number of blocks + // if the for_each value is now known. + maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) + + path := append(path, cty.GetAttrStep{Name: name}) + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // If an unknown block placeholder was present then the placeholder + // may have expanded out into zero blocks, which is okay. + if maybeUnknownBlocks && actualV.IsNull() { + continue + } + moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + if maybeUnknownBlocks { + // When unknown blocks are present the final blocks may be + // at different indices than the planned blocks, so unfortunately + // we can't do our usual checks in this case without generating + // false negatives. + continue + } + + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so both values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + actualAtys := actualV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := actualAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q has vanished", k)) + continue + } + + plannedEV := plannedV.GetAttr(k) + actualEV := actualV.GetAttr(k) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) + errs = append(errs, moreErrs...) + } + if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan + for k := range actualAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) + continue + } + } + } + } else { + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + } + case configschema.NestingSet: + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { + errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + if maybeUnknownBlocks { + // When unknown blocks are present the final number of blocks + // may be different, either because the unknown set values + // become equal and are collapsed, or the count is unknown due + // a dynamic block. Unfortunately this means we can't do our + // usual checks in this case without generating false + // negatives. + continue + } + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return errs +} + +func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { + // NOTE: We don't normally use the GoString rendering of cty.Value in + // user-facing error messages as a rule, but we make an exception + // for this function because we expect the user to pass this message on + // verbatim to the provider development team and so more detail is better. + + var errs []error + if planned.Type() == cty.DynamicPseudoType { + // Anything goes, then + return errs + } + if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { + errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) + // If the types don't match then we can't do any other comparisons, + // so we bail early. + return errs + } + + if !planned.IsKnown() { + // We didn't know what were going to end up with during plan, so + // anything goes during apply. + return errs + } + + if actual.IsNull() { + if planned.IsNull() { + return nil + } + errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) + return errs + } + if planned.IsNull() { + errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) + return errs + } + + ty := planned.Type() + switch { + + case !actual.IsKnown(): + errs = append(errs, path.NewErrorf("was known, but now unknown")) + + case ty.IsPrimitiveType(): + if !actual.Equals(planned).True() { + errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) + } + + case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): + for it := planned.ElementIterator(); it.Next(); { + k, plannedV := it.Element() + if !actual.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) + continue + } + + actualV := actual.Index(k) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) + errs = append(errs, moreErrs...) + } + + for it := actual.ElementIterator(); it.Next(); { + k, _ := it.Element() + if !planned.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) + } + } + + case ty.IsObjectType(): + atys := ty.AttributeTypes() + for name := range atys { + // Because we already tested that the two values have the same type, + // we can assume that the same attributes are present in both and + // focus just on testing their values. + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) + errs = append(errs, moreErrs...) + } + + case ty.IsSetType(): + // We can't really do anything useful for sets here because changing + // an unknown element to known changes the identity of the element, and + // so we can't correlate them properly. However, we will at least check + // to ensure that the number of elements is consistent, along with + // the general type-match checks we ran earlier in this function. + if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { + + setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { + errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + + plannedL := planned.LengthInt() + actualL := actual.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) + } + } + } + + return errs +} + +func indexStrForErrors(v cty.Value) string { + switch v.Type() { + case cty.Number: + return v.AsBigFloat().Text('f', -1) + case cty.String: + return strconv.Quote(v.AsString()) + default: + // Should be impossible, since no other index types are allowed! + return fmt.Sprintf("%#v", v) + } +} + +// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the +// HCL dynamic block extension behaves when it's asked to expand a block whose +// for_each argument is unknown. In such cases, it generates a single placeholder +// block with all leaf attribute values unknown, and once the for_each +// expression becomes known the placeholder may be replaced with any number +// of blocks, so object compatibility checks would need to be more liberal. +// +// Set "nested" if testing a block that is nested inside a candidate block +// placeholder; this changes the interpretation of there being no blocks of +// a type to allow for there being zero nested blocks. +func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if nested && v.IsNull() { + return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder + } + return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) + default: + // These situations should be impossible for correct providers, but + // we permit the legacy SDK to produce some incorrect outcomes + // for compatibility with its existing logic, and so we must be + // tolerant here. + if !v.IsKnown() { + return true + } + if v.IsNull() { + return false // treated as if the list were empty, so we would see zero iterations below + } + + // For all other nesting modes, our value should be something iterable. + for it := v.ElementIterator(); it.Next(); { + _, ev := it.Element() + if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { + return true + } + } + + // Our default changes depending on whether we're testing the candidate + // block itself or something nested inside of it: zero blocks of a type + // can never contain a dynamic block placeholder, but a dynamic block + // placeholder might contain zero blocks of one of its own nested block + // types, if none were set in the config at all. + return nested + } +} + +func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { + if v.IsNull() { + return false // null value can never be a placeholder element + } + if !v.IsKnown() { + return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs + } + for name := range schema.Attributes { + av := v.GetAttr(name) + + // Unknown block placeholders contain only unknown or null attribute + // values, depending on whether or not a particular attribute was set + // explicitly inside the content block. Note that this is imprecise: + // non-placeholders can also match this, so this function can generate + // false positives. + if av.IsKnown() && !av.IsNull() { + return false + } + } + for name, blockS := range schema.BlockTypes { + if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { + return false + } + } + return true +} + +// assertSetValuesCompatible checks that each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa, +// using the given correlation function. +// +// This allows the number of elements in the sets to change as long as all +// elements in both sets can be correlated, making this function safe to use +// with sets that may contain unknown values as long as the unknown case is +// addressed in some reasonable way in the callback function. +// +// The callback always recieves values from set a as its first argument and +// values from set b in its second argument, so it is safe to use with +// non-commutative functions. +// +// As with assertValueCompatible, we assume that the target audience of error +// messages here is a provider developer (via a bug report from a user) and so +// we intentionally violate our usual rule of keeping cty implementation +// details out of error messages. +func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { + a := planned + b := actual + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if f(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + var errs []error + for i, eq := range aeqs { + if !eq { + errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) + } + } + if len(errs) > 0 { + // Exit early since otherwise we're likely to generate duplicate + // error messages from the other perspective in the subsequent loop. + return errs + } + for i, eq := range beqs { + if !eq { + errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go new file mode 100644 index 000000000..2c18a0108 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/doc.go @@ -0,0 +1,4 @@ +// Package objchange deals with the business logic of taking a prior state +// value and a config value and producing a proposed new merged value, along +// with other related rules in this domain. +package objchange diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go new file mode 100644 index 000000000..cbfefdddd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/lcs.go @@ -0,0 +1,104 @@ +package objchange + +import ( + "github.com/zclconf/go-cty/cty" +) + +// LongestCommonSubsequence finds a sequence of values that are common to both +// x and y, with the same relative ordering as in both collections. This result +// is useful as a first step towards computing a diff showing added/removed +// elements in a sequence. +// +// The approached used here is a "naive" one, assuming that both xs and ys will +// generally be small in most reasonable Terraform configurations. For larger +// lists the time/space usage may be sub-optimal. +// +// A pair of lists may have multiple longest common subsequences. In that +// case, the one selected by this function is undefined. +func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { + if len(xs) == 0 || len(ys) == 0 { + return make([]cty.Value, 0) + } + + c := make([]int, len(xs)*len(ys)) + eqs := make([]bool, len(xs)*len(ys)) + w := len(xs) + + for y := 0; y < len(ys); y++ { + for x := 0; x < len(xs); x++ { + eqV := xs[x].Equals(ys[y]) + eq := false + if eqV.IsKnown() && eqV.True() { + eq = true + eqs[(w*y)+x] = true // equality tests can be expensive, so cache it + } + if eq { + // Sequence gets one longer than for the cell at top left, + // since we'd append a new item to the sequence here. + if x == 0 || y == 0 { + c[(w*y)+x] = 1 + } else { + c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 + } + } else { + // We follow the longest of the sequence above and the sequence + // to the left of us in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + c[(w*y)+x] = l + } else { + c[(w*y)+x] = u + } + } + } + } + + // The bottom right cell tells us how long our longest sequence will be + seq := make([]cty.Value, c[len(c)-1]) + + // Now we will walk back from the bottom right cell, finding again all + // of the equal pairs to construct our sequence. + x := len(xs) - 1 + y := len(ys) - 1 + i := len(seq) - 1 + + for x > -1 && y > -1 { + if eqs[(w*y)+x] { + // Add the value to our result list and then walk diagonally + // up and to the left. + seq[i] = xs[x] + x-- + y-- + i-- + } else { + // Take the path with the greatest sequence length in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + x-- + } else { + y-- + } + } + } + + if i > -1 { + // should never happen if the matrix was constructed properly + panic("not enough elements in sequence") + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go new file mode 100644 index 000000000..a8629046c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/normalize_obj.go @@ -0,0 +1,132 @@ +package objchange + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go new file mode 100644 index 000000000..879fc93a1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/objchange.go @@ -0,0 +1,390 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// ProposedNewObject constructs a proposed new object value by combining the +// computed attribute values from "prior" with the configured attribute values +// from "config". +// +// Both value must conform to the given schema's implied type, or this function +// will panic. +// +// The prior value must be wholly known, but the config value may be unknown +// or have nested unknown values. +// +// The merging of the two objects includes the attributes of any nested blocks, +// which will be correlated in a manner appropriate for their nesting mode. +// Note in particular that the correlation for blocks backed by sets is a +// heuristic based on matching non-computed attribute values and so it may +// produce strange results with more "extreme" cases, such as a nested set +// block where _all_ attributes are computed. +func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + // If the config and prior are both null, return early here before + // populating the prior block. The prevents non-null blocks from appearing + // the proposed state value. + if config.IsNull() && prior.IsNull() { + return prior + } + + if prior.IsNull() { + // In this case, we will construct a synthetic prior value that is + // similar to the result of decoding an empty configuration block, + // which simplifies our handling of the top-level attributes/blocks + // below by giving us one non-null level of object to pull values from. + prior = AllAttributesNull(schema) + } + return proposedNewObject(schema, prior, config) +} + +// PlannedDataResourceObject is similar to ProposedNewObject but tailored for +// planning data resources in particular. Specifically, it replaces the values +// of any Computed attributes not set in the configuration with an unknown +// value, which serves as a placeholder for a value to be filled in by the +// provider when the data resource is finally read. +// +// Data resources are different because the planning of them is handled +// entirely within Terraform Core and not subject to customization by the +// provider. This function is, in effect, producing an equivalent result to +// passing the ProposedNewObject result into a provider's PlanResourceChange +// function, assuming a fixed implementation of PlanResourceChange that just +// fills in unknown values as needed. +func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { + // Our trick here is to run the ProposedNewObject logic with an + // entirely-unknown prior value. Because of cty's unknown short-circuit + // behavior, any operation on prior returns another unknown, and so + // unknown values propagate into all of the parts of the resulting value + // that would normally be filled in by preserving the prior state. + prior := cty.UnknownVal(schema.ImpliedType()) + return proposedNewObject(schema, prior, config) +} + +func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + if config.IsNull() || !config.IsKnown() { + // This is a weird situation, but we'll allow it anyway to free + // callers from needing to specifically check for these cases. + return prior + } + if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { + panic("ProposedNewObject only supports object-typed values") + } + + // From this point onwards, we can assume that both values are non-null + // object types, and that the config value itself is known (though it + // may contain nested values that are unknown.) + + newAttrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch { + case attr.Computed && attr.Optional: + // This is the trickiest scenario: we want to keep the prior value + // if the config isn't overriding it. Note that due to some + // ambiguity here, setting an optional+computed attribute from + // config and then later switching the config to null in a + // subsequent change causes the initial config value to be "sticky" + // unless the provider specifically overrides it during its own + // plan customization step. + if configV.IsNull() { + newV = priorV + } else { + newV = configV + } + case attr.Computed: + // configV will always be null in this case, by definition. + // priorV may also be null, but that's okay. + newV = priorV + default: + // For non-computed attributes, we always take the config value, + // even if it is null. If it's _required_ then null values + // should've been caught during an earlier validation step, and + // so we don't really care about that here. + newV = configV + } + newAttrs[name] = newV + } + + // Merging nested blocks is a little more complex, since we need to + // correlate blocks between both objects and then recursively propose + // a new object for each. The correlation logic depends on the nesting + // mode for each block type. + for name, blockType := range schema.BlockTypes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + newV = ProposedNewObject(&blockType.Block, priorV, configV) + + case configschema.NestingList: + // Nested blocks are correlated by index. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals = append(newVals, configEV) + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.TupleVal(newVals) + } else { + newV = cty.ListVal(newVals) + } + } else { + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.EmptyTupleVal + } else { + newV = cty.ListValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingMap: + // Despite the name, a NestingMap may produce either a map or + // object value, depending on whether the nested schema contains + // dynamically-typed attributes. + if configV.Type().IsObjectType() { + // Nested blocks are correlated by key. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + atys := configV.Type().AttributeTypes() + for name := range atys { + configEV := configV.GetAttr(name) + if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[name] = configEV + continue + } + priorEV := priorV.GetAttr(name) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[name] = newEV + } + // Although we call the nesting mode "map", we actually use + // object values so that elements might have different types + // in case of dynamically-typed attributes. + newV = cty.ObjectVal(newVals) + } else { + newV = cty.EmptyObjectVal + } + } else { + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + k := idx.AsString() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[k] = configEV + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[k] = newEV + } + newV = cty.MapVal(newVals) + } else { + newV = cty.MapValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingSet: + if !configV.Type().IsSetType() { + panic("configschema.NestingSet value is not a set as expected") + } + + // Nested blocks are correlated by comparing the element values + // after eliminating all of the computed attributes. In practice, + // this means that any config change produces an entirely new + // nested object, and we only propagate prior computed values + // if the non-computed attribute values are identical. + var cmpVals [][2]cty.Value + if priorV.IsKnown() && !priorV.IsNull() { + cmpVals = setElementCompareValues(&blockType.Block, priorV, false) + } + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + _, configEV := it.Element() + var priorEV cty.Value + for i, cmp := range cmpVals { + if used[i] { + continue + } + if cmp[1].RawEquals(configEV) { + priorEV = cmp[0] + used[i] = true // we can't use this value on a future iteration + break + } + } + if priorEV == cty.NilVal { + priorEV = cty.NullVal(blockType.ImpliedType()) + } + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + newV = cty.SetVal(newVals) + } else { + newV = cty.SetValEmpty(blockType.Block.ImpliedType()) + } + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + + newAttrs[name] = newV + } + + return cty.ObjectVal(newAttrs) +} + +// setElementCompareValues takes a known, non-null value of a cty.Set type and +// returns a table -- constructed of two-element arrays -- that maps original +// set element values to corresponding values that have all of the computed +// values removed, making them suitable for comparison with values obtained +// from configuration. The element type of the set must conform to the implied +// type of the given schema, or this function will panic. +// +// In the resulting slice, the zeroth element of each array is the original +// value and the one-indexed element is the corresponding "compare value". +// +// This is intended to help correlate prior elements with configured elements +// in ProposedNewObject. The result is a heuristic rather than an exact science, +// since e.g. two separate elements may reduce to the same value through this +// process. The caller must therefore be ready to deal with duplicates. +func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { + ret := make([][2]cty.Value, 0, set.LengthInt()) + for it := set.ElementIterator(); it.Next(); { + _, ev := it.Element() + ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) + } + return ret +} + +// setElementCompareValue creates a new value that has all of the same +// non-computed attribute values as the one given but has all computed +// attribute values forced to null. +// +// If isConfig is true then non-null Optional+Computed attribute values will +// be preserved. Otherwise, they will also be set to null. +// +// The input value must conform to the schema's implied type, and the return +// value is guaranteed to conform to it. +func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { + if v.IsNull() || !v.IsKnown() { + return v + } + + attrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + switch { + case attr.Computed && attr.Optional: + if isConfig { + attrs[name] = v.GetAttr(name) + } else { + attrs[name] = cty.NullVal(attr.Type) + } + case attr.Computed: + attrs[name] = cty.NullVal(attr.Type) + default: + attrs[name] = v.GetAttr(name) + } + } + + for name, blockType := range schema.BlockTypes { + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) + + case configschema.NestingList, configschema.NestingSet: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + if l := cv.LengthInt(); l > 0 { + elems := make([]cty.Value, 0, l) + for it := cv.ElementIterator(); it.Next(); { + _, ev := it.Element() + elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) + } + if blockType.Nesting == configschema.NestingSet { + // SetValEmpty would panic if given elements that are not + // all of the same type, but that's guaranteed not to + // happen here because our input value was _already_ a + // set and we've not changed the types of any elements here. + attrs[name] = cty.SetVal(elems) + } else { + attrs[name] = cty.TupleVal(elems) + } + } else { + if blockType.Nesting == configschema.NestingSet { + attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) + } else { + attrs[name] = cty.EmptyTupleVal + } + } + + case configschema.NestingMap: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + elems := make(map[string]cty.Value) + for it := cv.ElementIterator(); it.Next(); { + kv, ev := it.Element() + elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) + } + attrs[name] = cty.ObjectVal(elems) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + } + + return cty.ObjectVal(attrs) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go new file mode 100644 index 000000000..905a91142 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange/plan_valid.go @@ -0,0 +1,267 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// AssertPlanValid checks checks whether a planned new state returned by a +// provider's PlanResourceChange method is suitable to achieve a change +// from priorState to config. It returns a slice with nonzero length if +// any problems are detected. Because problems here indicate bugs in the +// provider that generated the plannedState, they are written with provider +// developers as an audience, rather than end-users. +// +// All of the given values must have the same type and must conform to the +// implied type of the given schema, or this function may panic or produce +// garbage results. +// +// During planning, a provider may only make changes to attributes that are +// null (unset) in the configuration and are marked as "computed" in the +// resource type schema, in order to insert any default values the provider +// may know about. If the default value cannot be determined until apply time, +// the provider can return an unknown value. Providers are forbidden from +// planning a change that disagrees with any non-null argument in the +// configuration. +// +// As a special exception, providers _are_ allowed to provide attribute values +// conflicting with configuration if and only if the planned value exactly +// matches the corresponding attribute value in the prior state. The provider +// can use this to signal that the new value is functionally equivalent to +// the old and thus no change is required. +func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { + return assertPlanValid(schema, priorState, config, plannedState, nil) +} + +func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + if plannedState.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) + return errs + } + if config.IsNull() && !plannedState.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) + return errs + } + if plannedState.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + impTy := schema.ImpliedType() + + for name, attrS := range schema.Attributes { + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(attrS.Type) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + } + for name, blockS := range schema.BlockTypes { + path := append(path, cty.GetAttrStep{Name: name}) + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(impTy.AttributeType(name)) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + if plannedV.RawEquals(configV) { + // Easy path: nothing has changed at all + continue + } + if !plannedV.IsKnown() { + errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + if !configV.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so all values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + configAtys := configV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := configAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + plannedEV := plannedV.GetAttr(k) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + configEV := configV.GetAttr(k) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.Type().HasAttribute(k) { + priorEV = priorV.GetAttr(k) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) + continue + } + } + } else { + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + k := idx.AsString() + if !configV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for it := configV.ElementIterator(); it.Next(); { + idx, _ := it.Element() + if !plannedV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) + continue + } + } + } + case configschema.NestingSet: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested block + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. :( + // + // (In principle we could correlate elements by matching the + // subset of attributes explicitly set in config, except for the + // special diff suppression rule which allows for there to be a + // planned value that is constructed by mixing part of a prior + // value with part of a config value, creating an entirely new + // element that is not present in either prior nor config.) + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + } + + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + + return errs +} + +func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { + var errs []error + if plannedV.RawEquals(configV) { + // This is the easy path: provider didn't change anything at all. + return errs + } + if plannedV.RawEquals(priorV) && !priorV.IsNull() { + // Also pretty easy: there is a prior value and the provider has + // returned it unchanged. This indicates that configV and plannedV + // are functionally equivalent and so the provider wishes to disregard + // the configuration value in favor of the prior. + return errs + } + if attrS.Computed && configV.IsNull() { + // The provider is allowed to change the value of any computed + // attribute that isn't explicitly set in the config. + return errs + } + + // If none of the above conditions match, the provider has made an invalid + // change to this attribute. + if priorV.IsNull() { + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) + } + return errs + } + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go new file mode 100644 index 000000000..0abed56a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plans/plan.go @@ -0,0 +1,92 @@ +package plans + +import ( + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the state and configuration it was +// built from, since the plan does not itself include all of the information +// required to make the changes indicated. +type Plan struct { + VariableValues map[string]DynamicValue + Changes *Changes + TargetAddrs []addrs.Targetable + ProviderSHA256s map[string][]byte + Backend Backend +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go new file mode 100644 index 000000000..f20f0507e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go new file mode 100644 index 000000000..105c32c6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert/schema.go @@ -0,0 +1,154 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{} + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go new file mode 100644 index 000000000..729e97099 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/error.go @@ -0,0 +1,64 @@ +package discovery + +// Error is a type used to describe situations that the caller must handle +// since they indicate some form of user error. +// +// The functions and methods that return these specialized errors indicate so +// in their documentation. The Error type should not itself be used directly, +// but rather errors should be compared using the == operator with the +// error constants in this package. +// +// Values of this type are _not_ used when the error being reported is an +// operational error (server unavailable, etc) or indicative of a bug in +// this package or its caller. +type Error string + +// ErrorNoSuitableVersion indicates that a suitable version (meeting given +// constraints) is not available. +const ErrorNoSuitableVersion = Error("no suitable version is available") + +// ErrorNoVersionCompatible indicates that all of the available versions +// that otherwise met constraints are not compatible with the current +// version of Terraform. +const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") + +// ErrorVersionIncompatible indicates that all of the versions within the +// constraints are not compatible with the current version of Terrafrom, though +// there does exist a version outside of the constaints that is compatible. +const ErrorVersionIncompatible = Error("incompatible provider version") + +// ErrorNoSuchProvider indicates that no provider exists with a name given +const ErrorNoSuchProvider = Error("no provider exists with the given name") + +// ErrorNoVersionCompatibleWithPlatform indicates that all of the available +// versions that otherwise met constraints are not compatible with the +// requested platform +const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") + +// ErrorMissingChecksumVerification indicates that either the provider +// distribution is missing the SHA256SUMS file or the checksum file does +// not contain a checksum for the binary plugin +const ErrorMissingChecksumVerification = Error("unable to verify checksum") + +// ErrorChecksumVerification indicates that the current checksum of the +// provider plugin has changed since the initial release and is not trusted +// to download +const ErrorChecksumVerification = Error("unexpected plugin checksum") + +// ErrorSignatureVerification indicates that the digital signature for a +// provider distribution could not be verified for one of the following +// reasons: missing signature file, missing public key, or the signature +// was not signed by any known key for the publisher +const ErrorSignatureVerification = Error("unable to verify signature") + +// ErrorServiceUnreachable indicates that the network was unable to connect +// to the registry service +const ErrorServiceUnreachable = Error("registry service is unreachable") + +// ErrorPublicRegistryUnreachable indicates that the network was unable to connect +// to the public registry in particular, so we can show a link to the statuspage +const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") + +func (err Error) Error() string { + return string(err) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go new file mode 100644 index 000000000..f053312b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/find.go @@ -0,0 +1,191 @@ +package discovery + +import ( + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// FindPlugins looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider") and +// returns a PluginMetaSet representing the discovered potential-plugins. +// +// Currently this supports two different naming schemes. The current +// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing +// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is +// files directly in the given directory whose names are like +// terraform-$KIND-$NAME. +// +// Only one plugin will be returned for each unique plugin (name, version) +// pair, with preference given to files found in earlier directories. +// +// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. +func FindPlugins(kind string, dirs []string) PluginMetaSet { + return ResolvePluginPaths(FindPluginPaths(kind, dirs)) +} + +// FindPluginPaths looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider"). +// +// The return value is a list of absolute paths that appear to refer to +// plugins in the given directories, based only on what can be inferred +// from the naming scheme. The paths returned are ordered such that files +// in later dirs appear after files in earlier dirs in the given directory +// list. Within the same directory plugins are returned in a consistent but +// undefined order. +func FindPluginPaths(kind string, dirs []string) []string { + // This is just a thin wrapper around findPluginPaths so that we can + // use the latter in tests with a fake machineName so we can use our + // test fixtures. + return findPluginPaths(kind, dirs) +} + +func findPluginPaths(kind string, dirs []string) []string { + prefix := "terraform-" + kind + "-" + + ret := make([]string, 0, len(dirs)) + + for _, dir := range dirs { + items, err := ioutil.ReadDir(dir) + if err != nil { + // Ignore missing dirs, non-dirs, etc + continue + } + + log.Printf("[DEBUG] checking for %s in %q", kind, dir) + + for _, item := range items { + fullName := item.Name() + + if !strings.HasPrefix(fullName, prefix) { + continue + } + + // New-style paths must have a version segment in filename + if strings.Contains(strings.ToLower(fullName), "_v") { + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[DEBUG] found %s %q", kind, fullName) + ret = append(ret, filepath.Clean(absPath)) + continue + } + + // Legacy style with files directly in the base directory + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) + + ret = append(ret, filepath.Clean(absPath)) + } + } + + return ret +} + +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + +// ResolvePluginPaths takes a list of paths to plugin executables (as returned +// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the +// referenced plugins. +// +// If the same combination of plugin name and version appears multiple times, +// the earlier reference will be preferred. Several different versions of +// the same plugin name may be returned, in which case the methods of +// PluginMetaSet can be used to filter down. +func ResolvePluginPaths(paths []string) PluginMetaSet { + s := make(PluginMetaSet) + + type nameVersion struct { + Name string + Version string + } + found := make(map[nameVersion]struct{}) + + for _, path := range paths { + baseName := strings.ToLower(filepath.Base(path)) + if !strings.HasPrefix(baseName, "terraform-") { + // Should never happen with reasonable input + continue + } + + baseName = baseName[10:] + firstDash := strings.Index(baseName, "-") + if firstDash == -1 { + // Should never happen with reasonable input + continue + } + + baseName = baseName[firstDash+1:] + if baseName == "" { + // Should never happen with reasonable input + continue + } + + // Trim the .exe suffix used on Windows before we start wrangling + // the remainder of the path. + if strings.HasSuffix(baseName, ".exe") { + baseName = baseName[:len(baseName)-4] + } + + parts := strings.SplitN(baseName, "_v", 2) + name := parts[0] + version := VersionZero + if len(parts) == 2 { + version = parts[1] + } + + // Auto-installed plugins contain an extra name portion representing + // the expected plugin version, which we must trim off. + if underX := strings.Index(version, "_x"); underX != -1 { + version = version[:underX] + } + + if _, ok := found[nameVersion{name, version}]; ok { + // Skip duplicate versions of the same plugin + // (We do this during this step because after this we will be + // dealing with sets and thus lose our ordering with which to + // decide preference.) + continue + } + + s.Add(PluginMeta{ + Name: name, + Version: VersionStr(version), + Path: path, + }) + found[nameVersion{name, version}] = struct{}{} + } + + return s +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go new file mode 100644 index 000000000..722bb28a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get.go @@ -0,0 +1,669 @@ +package discovery + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/hashicorp/errwrap" + getter "github.com/hashicorp/go-getter" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" + "github.com/hashicorp/terraform-svchost/disco" + "github.com/mitchellh/cli" +) + +// Releases are located by querying the terraform registry. + +var httpClient *http.Client + +func init() { + httpClient = httpclient.New() + + httpGetter := &getter.HttpGetter{ + Client: httpClient, + Netrc: true, + } + + getter.Getters["http"] = httpGetter + getter.Getters["https"] = httpGetter +} + +// ProviderInstaller is an Installer implementation that knows how to +// download Terraform providers from the official HashiCorp releases service +// into a local directory. The files downloaded are compliant with the +// naming scheme expected by FindPlugins, so the target directory of a +// provider installer can be used as one of several plugin discovery sources. +type ProviderInstaller struct { + Dir string + + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + + PluginProtocolVersion uint + + // OS and Arch specify the OS and architecture that should be used when + // installing plugins. These use the same labels as the runtime.GOOS and + // runtime.GOARCH variables respectively, and indeed the values of these + // are used as defaults if either of these is the empty string. + OS string + Arch string + + // Skip checksum and signature verification + SkipVerify bool + + Ui cli.Ui // Ui for output + + // Services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + Services *disco.Disco + + // registry client + registry *registry.Client +} + +// Get is part of an implementation of type Installer, and attempts to download +// and install a Terraform provider matching the given constraints. +// +// This method may return one of a number of sentinel errors from this +// package to indicate issues that are likely to be resolvable via user action: +// +// ErrorNoSuchProvider: no provider with the given name exists in the repository. +// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. +// ErrorNoVersionCompatible: a plugin was found within the constraints but it is +// incompatible with the current Terraform version. +// +// These errors should be recognized and handled as special cases by the caller +// to present a suitable user-oriented error message. +// +// All other errors indicate an internal problem that is likely _not_ solvable +// through user action, or at least not within Terraform's scope. Error messages +// are produced under the assumption that if presented to the user they will +// be presented alongside context about what is being installed, and thus the +// error messages do not redundantly include such information. +func (i *ProviderInstaller) Get(provider addrs.ProviderType, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { + var diags tfdiags.Diagnostics + + // a little bit of initialization. + if i.OS == "" { + i.OS = runtime.GOOS + } + if i.Arch == "" { + i.Arch = runtime.GOARCH + } + if i.registry == nil { + i.registry = registry.NewClient(i.Services, nil) + } + + // get a full listing of versions for the requested provider + allVersions, err := i.listProviderVersions(provider) + + // TODO: return multiple errors + if err != nil { + log.Printf("[DEBUG] %s", err) + if registry.IsServiceUnreachable(err) { + registryHost, err := i.hostname() + if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { + return PluginMeta{}, diags, ErrorPublicRegistryUnreachable + } + return PluginMeta{}, diags, ErrorServiceUnreachable + } + if registry.IsServiceNotProvided(err) { + return PluginMeta{}, diags, err + } + return PluginMeta{}, diags, ErrorNoSuchProvider + } + + // Add any warnings from the response to diags + for _, warning := range allVersions.Warnings { + hostname, err := i.hostname() + if err != nil { + return PluginMeta{}, diags, err + } + diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) + diags = diags.Append(diag) + } + + if len(allVersions.Versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + providerSource := allVersions.ID + + // Filter the list of plugin versions to those which meet the version constraints + versions := allowedVersions(allVersions, req) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + + // sort them newest to oldest. The newest version wins! + response.ProviderVersionCollection(versions).Sort() + + // if the chosen provider version does not support the requested platform, + // filter the list of acceptable versions to those that support that platform + if err := i.checkPlatformCompatibility(versions[0]); err != nil { + versions = i.platformCompatibleVersions(versions) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform + } + } + + // we now have a winning platform-compatible version + versionMeta := versions[0] + v := VersionStr(versionMeta.Version).MustParse() + + // check protocol compatibility + if err := i.checkPluginProtocol(versionMeta); err != nil { + closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) + if err != nil { + // No operation here if we can't find a version with compatible protocol + return PluginMeta{}, diags, err + } + + // Prompt version suggestion to UI based on closest protocol match + var errMsg string + closestVersion := VersionStr(closestMatch.Version).MustParse() + if v.NewerThan(closestVersion) { + errMsg = providerProtocolTooNew + } else { + errMsg = providerProtocolTooOld + } + + constraintStr := req.String() + if constraintStr == "" { + constraintStr = "(any version)" + } + + return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( + errMsg, provider, v.String(), tfversion.String(), + closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) + } + + downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) + if err != nil { + return PluginMeta{}, diags, err + } + providerURL := downloadURLs.DownloadURL + + if !i.SkipVerify { + // Terraform verifies the integrity of a provider release before downloading + // the plugin binary. The digital signature (SHA256SUMS.sig) on the + // release distribution (SHA256SUMS) is verified with the public key of the + // publisher provided in the Terraform Registry response, ensuring that + // everything is as intended by the publisher. The checksum of the provider + // plugin is expected in the SHA256SUMS file and is double checked to match + // the checksum of the original published release to the Registry. This + // enforces immutability of releases between the Registry and the plugin's + // host location. Lastly, the integrity of the binary is verified upon + // download matches the Registry and signed checksum. + sha256, err := i.getProviderChecksum(downloadURLs) + if err != nil { + return PluginMeta{}, diags, err + } + + // add the checksum parameter for go-getter to verify the download for us. + if sha256 != "" { + providerURL = providerURL + "?checksum=sha256:" + sha256 + } + } + + printedProviderName := fmt.Sprintf("%q (%s)", provider.Name, providerSource) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) + log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) + err = i.install(provider, v, providerURL) + if err != nil { + return PluginMeta{}, diags, err + } + + // Find what we just installed + // (This is weird, because go-getter doesn't directly return + // information about what was extracted, and we just extracted + // the archive directly into a shared dir here.) + log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider.Name, versionMeta.Version) + metas := FindPlugins("provider", []string{i.Dir}) + log.Printf("[DEBUG] all plugins found %#v", metas) + metas, _ = metas.ValidateVersions() + metas = metas.WithName(provider.Name).WithVersion(v) + log.Printf("[DEBUG] filtered plugins %#v", metas) + if metas.Count() == 0 { + // This should never happen. Suggests that the release archive + // contains an executable file whose name doesn't match the + // expected convention. + return PluginMeta{}, diags, fmt.Errorf( + "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + if metas.Count() > 1 { + // This should also never happen, and suggests that a + // particular version was re-released with a different + // executable filename. We consider releases as immutable, so + // this is an error. + return PluginMeta{}, diags, fmt.Errorf( + "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + // By now we know we have exactly one meta, and so "Newest" will + // return that one. + return metas.Newest(), diags, nil +} + +func (i *ProviderInstaller) install(provider addrs.ProviderType, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider.Name, version) + cached := i.Cache.CachedPluginPath("provider", provider.Name, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider.Name, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider.Name, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + // check if the target dir exists, and create it if not + var err error + if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { + err = os.MkdirAll(i.Dir, 0700) + } + if err != nil { + return err + } + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider.Name, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider.Name, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + return nil +} + +func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { + purge := make(PluginMetaSet) + + present := FindPlugins("provider", []string{i.Dir}) + for meta := range present { + chosen, ok := used[meta.Name] + if !ok { + purge.Add(meta) + } + if chosen.Path != meta.Path { + purge.Add(meta) + } + } + + removed := make(PluginMetaSet) + var errs error + for meta := range purge { + path := meta.Path + err := os.Remove(path) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "failed to remove unused provider plugin %s: %s", + path, err, + )) + } else { + removed.Add(meta) + } + } + + return removed, errs +} + +func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { + // Get SHA256SUMS file. + shasums, err := getFile(resp.ShasumsURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) + return "", ErrorMissingChecksumVerification + } + + // Get SHA256SUMS.sig file. + signature, err := getFile(resp.ShasumsSignatureURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) + return "", ErrorSignatureVerification + } + + // Verify the GPG signature returned from the Registry. + asciiArmor := resp.SigningKeys.GPGASCIIArmor() + signer, err := verifySig(shasums, signature, asciiArmor) + if err != nil { + log.Printf("[ERROR] error verifying signature: %s", err) + return "", ErrorSignatureVerification + } + + // Also verify the GPG signature against the HashiCorp public key. This is + // a temporary additional check until a more robust key verification + // process is added in a future release. + _, err = verifySig(shasums, signature, HashicorpPublicKey) + if err != nil { + log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) + return "", ErrorSignatureVerification + } + + // Display identity for GPG key which succeeded verifying the signature. + // This could also be used to display to the user with i.Ui.Info(). + identities := []string{} + for k := range signer.Identities { + identities = append(identities, k) + } + identity := strings.Join(identities, ", ") + log.Printf("[DEBUG] verified GPG signature with key from %s", identity) + + // Extract checksum for this os/arch platform binary and verify against Registry + checksum := checksumForFile(shasums, resp.Filename) + if checksum == "" { + log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) + return "", ErrorMissingChecksumVerification + } else if checksum != resp.Shasum { + log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) + return "", ErrorChecksumVerification + } + + return checksum, nil +} + +func (i *ProviderInstaller) hostname() (string, error) { + provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) + svchost, err := provider.SvcHost() + if err != nil { + return "", err + } + + return svchost.ForDisplay(), nil +} + +// list all versions available for the named provider +func (i *ProviderInstaller) listProviderVersions(provider addrs.ProviderType) (*response.TerraformProviderVersions, error) { + req := regsrc.NewTerraformProvider(provider.Name, i.OS, i.Arch) + versions, err := i.registry.TerraformProviderVersions(req) + return versions, err +} + +func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { + urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) + if urls == nil { + return nil, fmt.Errorf("No download urls found for provider %s", name) + } + return urls, err +} + +// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. +// Prerelease versions are filtered. +func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + // Loop through all the provider versions to find the earliest and latest + // versions that match the installer protocol to then select the closest of the two + var latest, earliest *response.TerraformProviderVersion + for _, version := range versions { + // Prereleases are filtered and will not be suggested + v, err := VersionStr(version.Version).Parse() + if err != nil || v.IsPrerelease() { + continue + } + + if err := i.checkPluginProtocol(version); err == nil { + if earliest == nil { + // Found the first provider version with compatible protocol + earliest = version + } + // Update the latest protocol compatible version + latest = version + } + } + if earliest == nil { + // No compatible protocol was found for any version + return nil, ErrorNoVersionCompatible + } + + // Convert protocols to comparable types + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + + earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() + if err != nil { + return nil, err + } + + // Compare installer protocol version with the first protocol listed of the earliest match + // [A, B] where A is assumed the earliest compatible major version of the protocol pair + if protocolVersion.NewerThan(earliestVersionProtocol) { + // Provider protocols are too old, the closest version is the earliest compatible version + return earliest, nil + } + + // Provider protocols are too new, the closest version is the latest compatible version + return latest, nil +} + +func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { + // TODO: should this be a different error? We should probably differentiate between + // no compatible versions and no protocol versions listed at all + if len(versionMeta.Protocols) == 0 { + return fmt.Errorf("no plugin protocol versions listed") + } + + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() + if err != nil { + // This should not fail if the preceding function succeeded. + return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) + } + + for _, p := range versionMeta.Protocols { + proPro, err := VersionStr(p).Parse() + if err != nil { + // invalid protocol reported by the registry. Move along. + log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) + continue + } + // success! + if protocolConstraint.Allows(proPro) { + return nil + } + } + + return ErrorNoVersionCompatible +} + +// platformCompatibleVersions returns a list of provider versions that are +// compatible with the requested platform. +func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { + var v []*response.TerraformProviderVersion + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + v = append(v, version) + } + } + return v +} + +func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { + if len(versionMeta.Platforms) == 0 { + return fmt.Errorf("no supported provider platforms listed") + } + for _, p := range versionMeta.Platforms { + if p.Arch == i.Arch && p.OS == i.OS { + return nil + } + } + return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) +} + +// take the list of available versions for a plugin, and filter out those that +// don't fit the constraints. +func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { + var allowed []*response.TerraformProviderVersion + + for _, v := range available.Versions { + version, err := VersionStr(v.Version).Parse() + if err != nil { + log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) + continue + } + if required.Allows(version) { + allowed = append(allowed, v) + } + } + return allowed +} + +func checksumForFile(sums []byte, name string) string { + for _, line := range strings.Split(string(sums), "\n") { + parts := strings.Fields(line) + if len(parts) > 1 && parts[1] == name { + return parts[0] + } + } + return "" +} + +func getFile(url string) ([]byte, error) { + resp, err := httpClient.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s", resp.Status) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return data, err + } + return data, nil +} + +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of terraform, +// but an older version of the provider is compatible. +const providerProtocolTooOld = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the earliest compatible version. Select it with +the following version constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of terraform, +// and the user could either upgrade terraform or choose an older version of the +// provider +const providerProtocolTooNew = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the latest compatible version. Select it with +the following constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. + +Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go new file mode 100644 index 000000000..1a1004264 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go new file mode 100644 index 000000000..4622ca054 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/hashicorp.go @@ -0,0 +1,34 @@ +package discovery + +// HashicorpPublicKey is the HashiCorp public key, also available at +// https://www.hashicorp.com/security +const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go new file mode 100644 index 000000000..bdcebcb9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta.go @@ -0,0 +1,41 @@ +package discovery + +import ( + "crypto/sha256" + "io" + "os" +) + +// PluginMeta is metadata about a plugin, useful for launching the plugin +// and for understanding which plugins are available. +type PluginMeta struct { + // Name is the name of the plugin, e.g. as inferred from the plugin + // binary's filename, or by explicit configuration. + Name string + + // Version is the semver version of the plugin, expressed as a string + // that might not be semver-valid. + Version VersionStr + + // Path is the absolute path of the executable that can be launched + // to provide the RPC server for this plugin. + Path string +} + +// SHA256 returns a SHA256 hash of the content of the referenced executable +// file, or an error if the file's contents cannot be read. +func (m PluginMeta) SHA256() ([]byte, error) { + f, err := os.Open(m.Path) + if err != nil { + return nil, err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go new file mode 100644 index 000000000..3a992892d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/meta_set.go @@ -0,0 +1,195 @@ +package discovery + +// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. +// +// Methods on this type allow filtering of the set to produce subsets that +// meet more restrictive criteria. +type PluginMetaSet map[PluginMeta]struct{} + +// Add inserts the given PluginMeta into the receiving set. This is a no-op +// if the given meta is already present. +func (s PluginMetaSet) Add(p PluginMeta) { + s[p] = struct{}{} +} + +// Remove removes the given PluginMeta from the receiving set. This is a no-op +// if the given meta is not already present. +func (s PluginMetaSet) Remove(p PluginMeta) { + delete(s, p) +} + +// Has returns true if the given meta is in the receiving set, or false +// otherwise. +func (s PluginMetaSet) Has(p PluginMeta) bool { + _, ok := s[p] + return ok +} + +// Count returns the number of metas in the set +func (s PluginMetaSet) Count() int { + return len(s) +} + +// ValidateVersions returns two new PluginMetaSets, separating those with +// versions that have syntax-valid semver versions from those that don't. +// +// Eliminating invalid versions from consideration (and possibly warning about +// them) is usually the first step of working with a meta set after discovery +// has completed. +func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { + valid = make(PluginMetaSet) + invalid = make(PluginMetaSet) + for p := range s { + if _, err := p.Version.Parse(); err == nil { + valid.Add(p) + } else { + invalid.Add(p) + } + } + return +} + +// WithName returns the subset of metas that have the given name. +func (s PluginMetaSet) WithName(name string) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + if p.Name == name { + ns.Add(p) + } + } + return ns +} + +// WithVersion returns the subset of metas that have the given version. +// +// This should be used only with the "valid" result from ValidateVersions; +// it will ignore any plugin metas that have invalid version strings. +func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + gotVersion, err := p.Version.Parse() + if err != nil { + continue + } + if gotVersion.Equal(version) { + ns.Add(p) + } + } + return ns +} + +// ByName groups the metas in the set by their Names, returning a map. +func (s PluginMetaSet) ByName() map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + ret[p.Name].Add(p) + } + return ret +} + +// Newest returns the one item from the set that has the newest Version value. +// +// The result is meaningful only if the set is already filtered such that +// all of the metas have the same Name. +// +// If there isn't at least one meta in the set then this function will panic. +// Use Count() to ensure that there is at least one value before calling. +// +// If any of the metas have invalid version strings then this function will +// panic. Use ValidateVersions() first to filter out metas with invalid +// versions. +// +// If two metas have the same Version then one is arbitrarily chosen. This +// situation should be avoided by pre-filtering the set. +func (s PluginMetaSet) Newest() PluginMeta { + if len(s) == 0 { + panic("can't call NewestStable on empty PluginMetaSet") + } + + var first = true + var winner PluginMeta + var winnerVersion Version + for p := range s { + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + + if first == true || version.NewerThan(winnerVersion) { + winner = p + winnerVersion = version + first = false + } + } + + return winner +} + +// ConstrainVersions takes a set of requirements and attempts to +// return a map from name to a set of metas that have the matching +// name and an appropriate version. +// +// If any of the given requirements match *no* plugins then its PluginMetaSet +// in the returned map will be empty. +// +// All viable metas are returned, so the caller can apply any desired filtering +// to reduce down to a single option. For example, calling Newest() to obtain +// the highest available version. +// +// If any of the metas in the set have invalid version strings then this +// function will panic. Use ValidateVersions() first to filter out metas with +// invalid versions. +func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + name := p.Name + allowedVersions, ok := reqd[name] + if !ok { + continue + } + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + if allowedVersions.Allows(version) { + ret[p.Name].Add(p) + } + } + return ret +} + +// OverridePaths returns a new set where any existing plugins with the given +// names are removed and replaced with the single path given in the map. +// +// This is here only to continue to support the legacy way of overriding +// plugin binaries in the .terraformrc file. It treats all given plugins +// as pre-versioning (version 0.0.0). This mechanism will eventually be +// phased out, with vendor directories being the intended replacement. +func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { + ret := make(PluginMetaSet) + for p := range s { + if _, ok := paths[p.Name]; ok { + // Skip plugins that we're overridding + continue + } + + ret.Add(p) + } + + // Now add the metadata for overriding plugins + for name, path := range paths { + ret.Add(PluginMeta{ + Name: name, + Version: VersionZero, + Path: path, + }) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go new file mode 100644 index 000000000..75430fdd6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/requirements.go @@ -0,0 +1,105 @@ +package discovery + +import ( + "bytes" +) + +// PluginRequirements describes a set of plugins (assumed to be of a consistent +// kind) that are required to exist and have versions within the given +// corresponding sets. +type PluginRequirements map[string]*PluginConstraints + +// PluginConstraints represents an element of PluginRequirements describing +// the constraints for a single plugin. +type PluginConstraints struct { + // Specifies that the plugin's version must be within the given + // constraints. + Versions Constraints + + // If non-nil, the hash of the on-disk plugin executable must exactly + // match the SHA256 hash given here. + SHA256 []byte +} + +// Allows returns true if the given version is within the receiver's version +// constraints. +func (s *PluginConstraints) Allows(v Version) bool { + return s.Versions.Allows(v) +} + +// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, +// either because it matches the constraint or because there is no such +// constraint. +func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { + if s.SHA256 == nil { + return true + } + return bytes.Equal(s.SHA256, digest) +} + +// Merge takes the contents of the receiver and the other given requirements +// object and merges them together into a single requirements structure +// that satisfies both sets of requirements. +// +// Note that it doesn't make sense to merge two PluginRequirements with +// differing required plugin SHA256 hashes, since the result will never +// match any plugin. +func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { + ret := make(PluginRequirements) + for n, c := range r { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + for n, c := range other { + if existing, exists := ret[n]; exists { + ret[n].Versions = ret[n].Versions.Append(c.Versions) + + if existing.SHA256 != nil { + if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { + // If we've been asked to merge two constraints with + // different SHA256 hashes then we'll produce a dummy value + // that can never match anything. This is a silly edge case + // that no reasonable caller should hit. + ret[n].SHA256 = []byte(invalidProviderHash) + } + } else { + ret[n].SHA256 = c.SHA256 // might still be nil + } + } else { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + } + return ret +} + +// LockExecutables applies additional constraints to the receiver that +// require plugin executables with specific SHA256 digests. This modifies +// the receiver in-place, since it's intended to be applied after +// version constraints have been resolved. +// +// The given map must include a key for every plugin that is already +// required. If not, any missing keys will cause the corresponding plugin +// to never match, though the direct caller doesn't necessarily need to +// guarantee this as long as the downstream code _applying_ these constraints +// is able to deal with the non-match in some way. +func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { + for name, cons := range r { + digest := sha256s[name] + + if digest == nil { + // Prevent any match, which will then presumably cause the + // downstream consumer of this requirements to report an error. + cons.SHA256 = []byte(invalidProviderHash) + continue + } + + cons.SHA256 = digest + } +} + +const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go new file mode 100644 index 000000000..7bbae50c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/signature.go @@ -0,0 +1,19 @@ +package discovery + +import ( + "bytes" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// Verify the data using the provided openpgp detached signature and the +// embedded hashicorp public key. +func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) + if err != nil { + return nil, err + } + + return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go new file mode 100644 index 000000000..4311d5107 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version.go @@ -0,0 +1,77 @@ +package discovery + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" +) + +const VersionZero = "0.0.0" + +// A VersionStr is a string containing a possibly-invalid representation +// of a semver version number. Call Parse on it to obtain a real Version +// object, or discover that it is invalid. +type VersionStr string + +// Parse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s VersionStr) Parse() (Version, error) { + raw, err := version.NewVersion(string(s)) + if err != nil { + return Version{}, err + } + return Version{raw}, nil +} + +// MustParse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then it panics. +func (s VersionStr) MustParse() Version { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Version represents a version number that has been parsed from +// a semver string and known to be valid. +type Version struct { + // We wrap this here just because it avoids a proliferation of + // direct go-version imports all over the place, and keeps the + // version-processing details within this package. + raw *version.Version +} + +func (v Version) String() string { + return v.raw.String() +} + +func (v Version) NewerThan(other Version) bool { + return v.raw.GreaterThan(other.raw) +} + +func (v Version) Equal(other Version) bool { + return v.raw.Equal(other.raw) +} + +// IsPrerelease determines if version is a prerelease +func (v Version) IsPrerelease() bool { + return v.raw.Prerelease() != "" +} + +// MinorUpgradeConstraintStr returns a ConstraintStr that would permit +// minor upgrades relative to the receiving version. +func (v Version) MinorUpgradeConstraintStr() ConstraintStr { + segments := v.raw.Segments() + return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) +} + +type Versions []Version + +// Sort sorts version from newest to oldest. +func (v Versions) Sort() { + sort.Slice(v, func(i, j int) bool { + return v[i].NewerThan(v[j]) + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go new file mode 100644 index 000000000..fc8b6f8bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery/version_set.go @@ -0,0 +1,83 @@ +package discovery + +import ( + "sort" + + version "github.com/hashicorp/go-version" +) + +// A ConstraintStr is a string containing a possibly-invalid representation +// of a version constraint provided in configuration. Call Parse on it to +// obtain a real Constraint object, or discover that it is invalid. +type ConstraintStr string + +// Parse transforms a ConstraintStr into a Constraints if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s ConstraintStr) Parse() (Constraints, error) { + raw, err := version.NewConstraint(string(s)) + if err != nil { + return Constraints{}, err + } + return Constraints{raw}, nil +} + +// MustParse is like Parse but it panics if the constraint string is invalid. +func (s ConstraintStr) MustParse() Constraints { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Constraints represents a set of versions which any given Version is either +// a member of or not. +type Constraints struct { + raw version.Constraints +} + +// NewConstraints creates a Constraints based on a version.Constraints. +func NewConstraints(c version.Constraints) Constraints { + return Constraints{c} +} + +// AllVersions is a Constraints containing all versions +var AllVersions Constraints + +func init() { + AllVersions = Constraints{ + raw: make(version.Constraints, 0), + } +} + +// Allows returns true if the given version permitted by the receiving +// constraints set. +func (s Constraints) Allows(v Version) bool { + return s.raw.Check(v.raw) +} + +// Append combines the receiving set with the given other set to produce +// a set that is the intersection of both sets, which is to say that resulting +// constraints contain only the versions that are members of both. +func (s Constraints) Append(other Constraints) Constraints { + raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) + + // Since "raw" is a list of constraints that remove versions from the set, + // "Intersection" is implemented by concatenating together those lists, + // thus leaving behind only the versions not removed by either list. + raw = append(raw, s.raw...) + raw = append(raw, other.raw...) + + // while the set is unordered, we sort these lexically for consistent output + sort.Slice(raw, func(i, j int) bool { + return raw[i].String() < raw[j].String() + }) + + return Constraints{raw} +} + +// String returns a string representation of the set members as a set +// of range constraints. +func (s Constraints) String() string { + return s.raw.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go new file mode 100644 index 000000000..0f48f2447 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/addressed_types.go @@ -0,0 +1,47 @@ +package providers + +import ( + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// AddressedTypes is a helper that extracts all of the distinct provider +// types from the given list of relative provider configuration addresses. +func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.ProviderConfig.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go new file mode 100644 index 000000000..39aa1de60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/doc.go @@ -0,0 +1,3 @@ +// Package providers contains the interface and primary types required to +// implement a Terraform resource provider. +package providers diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go new file mode 100644 index 000000000..3d0aa8ec9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/provider.go @@ -0,0 +1,359 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetSchema() GetSchemaResponse + + // PrepareProviderConfig allows the provider to validate the configuration + // values, and set or override any values with defaults. + PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse + + // ValidateResourceTypeConfig allows the provider to validate the resource + // configuration values. + ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse + + // ValidateDataSource allows the provider to validate the data source + // configuration values. + ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + Configure(ConfigureRequest) ConfigureResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +type Schema struct { + Version int64 + Block *configschema.Block +} + +type PrepareProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type PrepareProviderConfigResponse struct { + // PreparedConfig is the configuration as prepared by the provider. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceTypeConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceTypeConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataSourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataSourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of thee attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go new file mode 100644 index 000000000..b42e49202 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/providers/resolver.go @@ -0,0 +1,68 @@ +package providers + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// Resolver is an interface implemented by objects that are able to resolve +// a given set of resource provider version constraints into Factory +// callbacks. +type Resolver interface { + // Given a constraint map, return a Factory for each requested provider. + // If some or all of the constraints cannot be satisfied, return a non-nil + // slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) +} + +// ResolverFunc wraps a callback function and turns it into a Resolver +// implementation, for convenience in situations where a function and its +// associated closure are sufficient as a resolver implementation. +type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) + +// ResolveProviders implements Resolver by calling the +// wrapped function. +func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + return f(reqd) +} + +// ResolverFixed returns a Resolver that has a fixed set of provider factories +// provided by the caller. The returned resolver ignores version constraints +// entirely and just returns the given factory for each requested provider +// name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResolverFixed(factories map[string]Factory) Resolver { + return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + ret := make(map[string]Factory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go new file mode 100644 index 000000000..b03ba9a1b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/doc.go @@ -0,0 +1,3 @@ +// Package provisioners contains the interface and primary types to implement a +// Terraform resource provisioner. +package provisioners diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go new file mode 100644 index 000000000..7a9dca0a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/factory.go @@ -0,0 +1,5 @@ +package provisioners + +// Factory is a function type that creates a new instance of a resource +// provisioner, or returns an error if that is impossible. +type Factory func() (Interface, error) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go new file mode 100644 index 000000000..7d8f4076b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/provisioners/provisioner.go @@ -0,0 +1,82 @@ +package provisioners + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go new file mode 100644 index 000000000..4ef22052c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go @@ -0,0 +1,346 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/httpclient" + internalhttpclient "github.com/hashicorp/terraform-plugin-sdk/internal/httpclient" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" + "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/disco" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" +) + +var tfVersion = version.String() + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = internalhttpclient.New() + client.Timeout = requestTimeout + } + + services.Transport = client.Transport + + services.SetUserAgent(httpclient.TerraformUserAgent(version.String())) + + return &Client{ + client: client, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// ModuleLocation find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := http.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} + +// TerraformProviderVersions queries the registry for a provider, and returns the available versions. +func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errProviderNotFound{addr: provider} + default: + return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) + } + + var versions response.TerraformProviderVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + return &versions, nil +} + +// TerraformProviderLocation queries the registry for a provider download metadata +func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join( + provider.TerraformProvider(), + version, + "download", + provider.OS, + provider.Arch, + )) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider location from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var loc response.TerraformProviderPlatformLocation + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&loc); err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) + default: + // anything else is an error: + return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) + } + + return &loc, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go new file mode 100644 index 000000000..b05438c4d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/errors.go @@ -0,0 +1,55 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" + "github.com/hashicorp/terraform-svchost/disco" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} + +type errProviderNotFound struct { + addr *regsrc.TerraformProvider +} + +func (e *errProviderNotFound) Error() string { + return fmt.Sprintf("provider %s not found", e.addr) +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go new file mode 100644 index 000000000..c9bc40bee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go @@ -0,0 +1,140 @@ +package regsrc + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform-svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.terraform.io. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go new file mode 100644 index 000000000..eb37481ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/module.go @@ -0,0 +1,175 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + svchost "github.com/hashicorp/terraform-svchost" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go new file mode 100644 index 000000000..c430bf141 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/regsrc.go @@ -0,0 +1,8 @@ +// Package regsrc provides helpers for working with source strings that identify +// resources within a Terraform registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.terraform.io") +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go new file mode 100644 index 000000000..7205d03b8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go @@ -0,0 +1,60 @@ +package regsrc + +import ( + "fmt" + "runtime" + "strings" + + "github.com/hashicorp/terraform-svchost" +) + +var ( + // DefaultProviderNamespace represents the namespace for canonical + // HashiCorp-controlled providers. + DefaultProviderNamespace = "-" +) + +// TerraformProvider describes a Terraform Registry Provider source. +type TerraformProvider struct { + RawHost *FriendlyHost + RawNamespace string + RawName string + OS string + Arch string +} + +// NewTerraformProvider constructs a new provider source. +func NewTerraformProvider(name, os, arch string) *TerraformProvider { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + + // separate namespace if included + namespace := DefaultProviderNamespace + if names := strings.SplitN(name, "/", 2); len(names) == 2 { + namespace, name = names[0], names[1] + } + p := &TerraformProvider{ + RawHost: PublicRegistryHost, + RawNamespace: namespace, + RawName: name, + OS: os, + Arch: arch, + } + + return p +} + +// Provider returns just the registry ID of the provider +func (p *TerraformProvider) TerraformProvider() string { + return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) +} + +// SvcHost returns the svchost.Hostname for this provider. The +// default PublicRegistryHost is returned. +func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { + return svchost.ForComparison(PublicRegistryHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go new file mode 100644 index 000000000..06163963e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module.go @@ -0,0 +1,46 @@ +package response + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go new file mode 100644 index 000000000..f69e9750c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/module_versions.go @@ -0,0 +1,32 @@ +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for terraform CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go new file mode 100644 index 000000000..75a925490 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/pagination.go @@ -0,0 +1,65 @@ +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go new file mode 100644 index 000000000..c2c333b0d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/response/terraform_provider.go @@ -0,0 +1,95 @@ +package response + +import ( + "sort" + "strings" + + version "github.com/hashicorp/go-version" +) + +// TerraformProvider is the response structure for all required information for +// Terraform to choose a download URL. It must include all versions and all +// platforms for Terraform to perform version and os/arch constraint matching +// locally. +type TerraformProvider struct { + ID string `json:"id"` + + Versions []*TerraformProviderVersion `json:"versions"` +} + +// TerraformProviderVersion is the Terraform-specific response structure for a +// provider version. +type TerraformProviderVersion struct { + Version string `json:"version"` + Protocols []string `json:"protocols"` + + Platforms []*TerraformProviderPlatform `json:"platforms"` +} + +// TerraformProviderVersions is the Terraform-specific response structure for an +// array of provider versions +type TerraformProviderVersions struct { + ID string `json:"id"` + Versions []*TerraformProviderVersion `json:"versions"` + Warnings []string `json:"warnings"` +} + +// TerraformProviderPlatform is the Terraform-specific response structure for a +// provider platform. +type TerraformProviderPlatform struct { + OS string `json:"os"` + Arch string `json:"arch"` +} + +// TerraformProviderPlatformLocation is the Terraform-specific response +// structure for a provider platform with all details required to perform a +// download. +type TerraformProviderPlatformLocation struct { + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + ShasumsURL string `json:"shasums_url"` + ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` + + SigningKeys SigningKeyList `json:"signing_keys"` +} + +// SigningKeyList is the response structure for a list of signing keys. +type SigningKeyList struct { + GPGKeys []*GPGKey `json:"gpg_public_keys"` +} + +// GPGKey is the response structure for a GPG key. +type GPGKey struct { + ASCIIArmor string `json:"ascii_armor"` + Source string `json:"source"` + SourceURL *string `json:"source_url"` +} + +// Collection type for TerraformProviderVersion +type ProviderVersionCollection []*TerraformProviderVersion + +// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg +// keys in the response. +func (signingKeys *SigningKeyList) GPGASCIIArmor() string { + keys := []string{} + + for _, gpgKey := range signingKeys.GPGKeys { + keys = append(keys, gpgKey.ASCIIArmor) + } + + return strings.Join(keys, "\n") +} + +// Sort sorts versions from newest to oldest. +func (v ProviderVersionCollection) Sort() { + sort.Slice(v, func(i, j int) bool { + versionA, _ := version.NewVersion(v[i].Version) + versionB, _ := version.NewVersion(v[j].Version) + + return versionA.GreaterThan(versionB) + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go new file mode 100644 index 000000000..7dd74ac78 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/doc.go @@ -0,0 +1,3 @@ +// Package states contains the types that are used to represent Terraform +// states. +package states diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go new file mode 100644 index 000000000..0dc73499a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/eachmode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type EachMode"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoEach-0] + _ = x[EachList-76] + _ = x[EachMap-77] +} + +const ( + _EachMode_name_0 = "NoEach" + _EachMode_name_1 = "EachListEachMap" +) + +var ( + _EachMode_index_1 = [...]uint8{0, 8, 15} +) + +func (i EachMode) String() string { + switch { + case i == 0: + return _EachMode_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] + default: + return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go new file mode 100644 index 000000000..891adc003 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_generation.go @@ -0,0 +1,20 @@ +package states + +// Generation is used to represent multiple objects in a succession of objects +// represented by a single resource instance address. A resource instance can +// have multiple generations over its lifetime due to object replacement +// (when a change can't be applied without destroying and re-creating), and +// multiple generations can exist at the same time when create_before_destroy +// is used. +// +// A Generation value can either be the value of the variable "CurrentGen" or +// a value of type DeposedKey. Generation values can be compared for equality +// using "==" and used as map keys. The zero value of Generation (nil) is not +// a valid generation and must not be used. +type Generation interface { + generation() +} + +// CurrentGen is the Generation representing the currently-active object for +// a resource instance. +var CurrentGen Generation diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go new file mode 100644 index 000000000..3bb717d33 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object.go @@ -0,0 +1,120 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// ResourceInstanceObject is the local representation of a specific remote +// object associated with a resource instance. In practice not all remote +// objects are actually remote in the sense of being accessed over the network, +// but this is the most common case. +// +// It is not valid to mutate a ResourceInstanceObject once it has been created. +// Instead, create a new object and replace the existing one. +type ResourceInstanceObject struct { + // Value is the object-typed value representing the remote object within + // Terraform. + Value cty.Value + + // Private is an opaque value set by the provider when this object was + // last created or updated. Terraform Core does not use this value in + // any way and it is not exposed anywhere in the user interface, so + // a provider can use it for retaining any necessary private state. + Private []byte + + // Status represents the "readiness" of the object as of the last time + // it was updated. + Status ObjectStatus + + // Dependencies is a set of other addresses in the same module which + // this instance depended on when the given attributes were evaluated. + // This is used to construct the dependency relationships for an object + // whose configuration is no longer available, such as if it has been + // removed from configuration altogether, or is now deposed. + Dependencies []addrs.Referenceable +} + +// ObjectStatus represents the status of a RemoteObject. +type ObjectStatus rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type ObjectStatus + +const ( + // ObjectReady is an object status for an object that is ready to use. + ObjectReady ObjectStatus = 'R' + + // ObjectTainted is an object status representing an object that is in + // an unrecoverable bad state due to a partial failure during a create, + // update, or delete operation. Since it cannot be moved into the + // ObjectRead state, a tainted object must be replaced. + ObjectTainted ObjectStatus = 'T' + + // ObjectPlanned is a special object status used only for the transient + // placeholder objects we place into state during the refresh and plan + // walks to stand in for objects that will be created during apply. + // + // Any object of this status must have a corresponding change recorded + // in the current plan, whose value must then be used in preference to + // the value stored in state when evaluating expressions. A planned + // object stored in state will be incomplete if any of its attributes are + // not yet known, and the plan must be consulted in order to "see" those + // unknown values, because the state is not able to represent them. + ObjectPlanned ObjectStatus = 'P' +) + +// Encode marshals the value within the receiver to produce a +// ResourceInstanceObjectSrc ready to be written to a state file. +// +// The given type must be the implied type of the resource type schema, and +// the given value must conform to it. It is important to pass the schema +// type and not the object's own type so that dynamically-typed attributes +// will be stored correctly. The caller must also provide the version number +// of the schema that the given type was derived from, which will be recorded +// in the source object so it can be used to detect when schema migration is +// required on read. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + // Our state serialization can't represent unknown values, so we convert + // them to nulls here. This is lossy, but nobody should be writing unknown + // values here and expecting to get them out again later. + // + // We get unknown values here while we're building out a "planned state" + // during the plan phase, but the value stored in the plan takes precedence + // for expression evaluation. The apply step should never produce unknown + // values, but if it does it's the responsibility of the caller to detect + // and raise an error about that. + val := cty.UnknownAsNull(o.Value) + + src, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + return &ResourceInstanceObjectSrc{ + SchemaVersion: schemaVersion, + AttrsJSON: src, + Private: o.Private, + Status: o.Status, + Dependencies: o.Dependencies, + }, nil +} + +// AsTainted returns a deep copy of the receiver with the status updated to +// ObjectTainted. +func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { + if o == nil { + // A nil object can't be tainted, but we'll allow this anyway to + // avoid a crash, since we presumably intend to eventually record + // the object has having been deleted anyway. + return nil + } + ret := o.DeepCopy() + ret.Status = ObjectTainted + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go new file mode 100644 index 000000000..728ad80d1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/instance_object_src.go @@ -0,0 +1,113 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// ResourceInstanceObjectSrc is a not-fully-decoded version of +// ResourceInstanceObject. Decoding of it can be completed by first handling +// any schema migration steps to get to the latest schema version and then +// calling method Decode with the implied type of the latest schema. +type ResourceInstanceObjectSrc struct { + // SchemaVersion is the resource-type-specific schema version number that + // was current when either AttrsJSON or AttrsFlat was encoded. Migration + // steps are required if this is less than the current version number + // reported by the corresponding provider. + SchemaVersion uint64 + + // AttrsJSON is a JSON-encoded representation of the object attributes, + // encoding the value (of the object type implied by the associated resource + // type schema) that represents this remote object in Terraform Language + // expressions, and is compared with configuration when producing a diff. + // + // This is retained in JSON format here because it may require preprocessing + // before decoding if, for example, the stored attributes are for an older + // schema version which the provider must upgrade before use. If the + // version is current, it is valid to simply decode this using the + // type implied by the current schema, without the need for the provider + // to perform an upgrade first. + // + // When writing a ResourceInstanceObject into the state, AttrsJSON should + // always be conformant to the current schema version and the current + // schema version should be recorded in the SchemaVersion field. + AttrsJSON []byte + + // AttrsFlat is a legacy form of attributes used in older state file + // formats, and in the new state format for objects that haven't yet been + // upgraded. This attribute is mutually exclusive with Attrs: for any + // ResourceInstanceObject, only one of these attributes may be populated + // and the other must be nil. + // + // An instance object with this field populated should be upgraded to use + // Attrs at the earliest opportunity, since this legacy flatmap-based + // format will be phased out over time. AttrsFlat should not be used when + // writing new or updated objects to state; instead, callers must follow + // the recommendations in the AttrsJSON documentation above. + AttrsFlat map[string]string + + // These fields all correspond to the fields of the same name on + // ResourceInstanceObject. + Private []byte + Status ObjectStatus + Dependencies []addrs.Referenceable +} + +// Decode unmarshals the raw representation of the object attributes. Pass the +// implied type of the corresponding resource type schema for correct operation. +// +// Before calling Decode, the caller must check that the SchemaVersion field +// exactly equals the version number of the schema whose implied type is being +// passed, or else the result is undefined. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { + var val cty.Value + var err error + if os.AttrsFlat != nil { + // Legacy mode. We'll do our best to unpick this from the flatmap. + val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) + if err != nil { + return nil, err + } + } else { + val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) + if err != nil { + return nil, err + } + } + + return &ResourceInstanceObject{ + Value: val, + Status: os.Status, + Dependencies: os.Dependencies, + Private: os.Private, + }, nil +} + +// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the +// metadata from the receiver and writing in the given new schema version +// and attribute value that are presumed to have resulted from upgrading +// from an older schema version. +func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + new := os.DeepCopy() + new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap + + // This is the same principle as ResourceInstanceObject.Encode, but + // avoiding a decode/re-encode cycle because we don't have type info + // available for the "old" attributes. + newAttrs = cty.UnknownAsNull(newAttrs) + src, err := ctyjson.Marshal(newAttrs, newType) + if err != nil { + return nil, err + } + + new.AttrsJSON = src + new.SchemaVersion = newSchemaVersion + return new, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go new file mode 100644 index 000000000..6b74cbfa6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/module.go @@ -0,0 +1,268 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceMeta updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr, + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.EachMode = eachMode + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + + is.Current = obj + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go new file mode 100644 index 000000000..96a6db2f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/objectstatus_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ObjectReady-82] + _ = x[ObjectTainted-84] + _ = x[ObjectPlanned-80] +} + +const ( + _ObjectStatus_name_0 = "ObjectPlanned" + _ObjectStatus_name_1 = "ObjectReady" + _ObjectStatus_name_2 = "ObjectTainted" +) + +func (i ObjectStatus) String() string { + switch { + case i == 80: + return _ObjectStatus_name_0 + case i == 82: + return _ObjectStatus_name_1 + case i == 84: + return _ObjectStatus_name_2 + default: + return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go new file mode 100644 index 000000000..d232b76d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/output_value.go @@ -0,0 +1,14 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Value cty.Value + Sensitive bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go new file mode 100644 index 000000000..32ea638ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/resource.go @@ -0,0 +1,233 @@ +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the module-relative address for the resource this state object + // belongs to. + Addr addrs.Resource + + // EachMode is the multi-instance mode currently in use for this resource, + // or NoEach if this is a single-instance resource. This dictates what + // type of value is returned when accessing this resource via expressions + // in the Terraform language. + EachMode EachMode + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic(fmt.Sprintf("get with nil Generation")) + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// EachMode specifies the multi-instance mode for a resource. +type EachMode rune + +const ( + NoEach EachMode = 0 + EachList EachMode = 'L' + EachMap EachMode = 'M' +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type EachMode + +func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { + switch key.(type) { + case addrs.IntKey: + return EachList + case addrs.StringKey: + return EachMap + default: + if key == addrs.NoKey { + return NoEach + } + panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go new file mode 100644 index 000000000..328dd53d5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state.go @@ -0,0 +1,229 @@ +package states + +import ( + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// State is the top-level type of a Terraform state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to acheive this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasResources returns true if there is at least one resource (of any mode) +// present in the receiving state. +func (s *State) HasResources() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + if len(ms.Resources) > 0 { + return true + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "terraform destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "terraform console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go new file mode 100644 index 000000000..6266aca79 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_deepcopy.go @@ -0,0 +1,221 @@ +package states + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Taking deep copies of states is an important operation because state is +// otherwise a mutable data structure that is challenging to share across +// many separate callers. It is important that the DeepCopy implementations +// in this file comprehensively copy all parts of the state data structure +// that could be mutated via pointers. + +// DeepCopy returns a new state that contains equivalent data to the reciever +// but shares no backing memory in common. +// +// As with all methods on State, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + modules := make(map[string]*Module, len(s.Modules)) + for k, m := range s.Modules { + modules[k] = m.DeepCopy() + } + return &State{ + Modules: modules, + } +} + +// DeepCopy returns a new module state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Module, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (ms *Module) DeepCopy() *Module { + if ms == nil { + return nil + } + + resources := make(map[string]*Resource, len(ms.Resources)) + for k, r := range ms.Resources { + resources[k] = r.DeepCopy() + } + outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) + for k, v := range ms.OutputValues { + outputValues[k] = v.DeepCopy() + } + localValues := make(map[string]cty.Value, len(ms.LocalValues)) + for k, v := range ms.LocalValues { + // cty.Value is immutable, so we don't need to copy these. + localValues[k] = v + } + + return &Module{ + Addr: ms.Addr, // technically mutable, but immutable by convention + Resources: resources, + OutputValues: outputValues, + LocalValues: localValues, + } +} + +// DeepCopy returns a new resource state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Resource, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (rs *Resource) DeepCopy() *Resource { + if rs == nil { + return nil + } + + instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) + for k, i := range rs.Instances { + instances[k] = i.DeepCopy() + } + + return &Resource{ + Addr: rs.Addr, + EachMode: rs.EachMode, + Instances: instances, + ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention + } +} + +// DeepCopy returns a new resource instance state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstance, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (is *ResourceInstance) DeepCopy() *ResourceInstance { + if is == nil { + return nil + } + + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) + for k, obj := range is.Deposed { + deposed[k] = obj.DeepCopy() + } + + return &ResourceInstance{ + Current: is.Current.DeepCopy(), + Deposed: deposed, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObjectSrc, this method is not safe to +// use concurrently with writing to any portion of the recieving data structure. +// It is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if obj == nil { + return nil + } + + var attrsFlat map[string]string + if obj.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(obj.AttrsFlat)) + for k, v := range obj.AttrsFlat { + attrsFlat[k] = v + } + } + + var attrsJSON []byte + if obj.AttrsJSON != nil { + attrsJSON = make([]byte, len(obj.AttrsJSON)) + copy(attrsJSON, obj.AttrsJSON) + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + + return &ResourceInstanceObjectSrc{ + Status: obj.Status, + SchemaVersion: obj.SchemaVersion, + Private: private, + AttrsFlat: attrsFlat, + AttrsJSON: attrsJSON, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObject, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if obj == nil { + return nil + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referenceable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + var dependencies []addrs.Referenceable + if obj.Dependencies != nil { + dependencies = make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + } + + return &ResourceInstanceObject{ + Value: obj.Value, + Status: obj.Status, + Private: private, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new output value state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on OutputValue, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *OutputValue) DeepCopy() *OutputValue { + if os == nil { + return nil + } + + return &OutputValue{ + Value: os.Value, + Sensitive: os.Sensitive, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go new file mode 100644 index 000000000..ea20967e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_equal.go @@ -0,0 +1,18 @@ +package states + +import ( + "reflect" +) + +// Equal returns true if the receiver is functionally equivalent to other, +// including any ephemeral portions of the state that would not be included +// if the state were saved to files. +// +// To test only the persistent portions of two states for equality, instead +// use statefile.StatesMarshalEqual. +func (s *State) Equal(other *State) bool { + // For the moment this is sufficient, but we may need to do something + // more elaborate in future if we have any portions of state that require + // more sophisticated comparisons. + return reflect.DeepEqual(s, other) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go new file mode 100644 index 000000000..dffd650d6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/state_string.go @@ -0,0 +1,279 @@ +package states + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// String returns a rather-odd string representation of the entire state. +// +// This is intended to match the behavior of the older terraform.State.String +// method that is used in lots of existing tests. It should not be used in +// new tests: instead, use "cmp" to directly compare the state data structures +// and print out a diff if they do not match. +// +// This method should never be used in non-test code, whether directly by call +// or indirectly via a %s or %q verb in package fmt. +func (s *State) String() string { + if s == nil { + return "" + } + + // sort the modules by name for consistent output + modules := make([]string, 0, len(s.Modules)) + for m := range s.Modules { + modules = append(modules, m) + } + sort.Strings(modules) + + var buf bytes.Buffer + for _, name := range modules { + m := s.Modules[name] + mStr := m.testString() + + // If we're the root module, we just write the output directly. + if m.Addr.IsRoot() { + buf.WriteString(mStr + "\n") + continue + } + + // We need to build out a string that resembles the not-quite-standard + // format that terraform.State.String used to use, where there's a + // "module." prefix but then just a chain of all of the module names + // without any further "module." portions. + buf.WriteString("module") + for _, step := range m.Addr { + buf.WriteByte('.') + buf.WriteString(step.Name) + if step.InstanceKey != addrs.NoKey { + buf.WriteByte('[') + buf.WriteString(step.InstanceKey.String()) + buf.WriteByte(']') + } + } + buf.WriteString(":\n") + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// testString is used to produce part of the output of State.String. It should +// never be used directly. +func (m *Module) testString() string { + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + // We use AbsResourceInstance here, even though everything belongs to + // the same module, just because we have a sorting behavior defined + // for those but not for just ResourceInstance. + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) + for _, rs := range m.Resources { + for ik := range rs.Instances { + addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) + } + } + + sort.Slice(addrsOrder, func(i, j int) bool { + return addrsOrder[i].Less(addrsOrder[j]) + }) + + for _, fakeAbsAddr := range addrsOrder { + addr := fakeAbsAddr.Resource + rs := m.Resource(addr.ContainingResource()) + is := m.ResourceInstance(addr) + + // Here we need to fake up a legacy-style address as the old state + // types would've used, since that's what our tests against those + // old types expect. The significant difference is that instancekey + // is dot-separated rather than using index brackets. + k := addr.ContainingResource().String() + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + k = fmt.Sprintf("%s.%d", k, tk) + default: + // No other key types existed for the legacy types, so we + // can do whatever we want here. We'll just use our standard + // syntax for these. + k = k + tk.String() + } + } + + id := LegacyInstanceObjectID(is.Current) + + taintStr := "" + if is.Current != nil && is.Current.Status == ObjectTainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(is.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) + + // Attributes were a flatmap before, but are not anymore. To preserve + // our old output as closely as possible we need to do a conversion + // to flatmap. Normally we'd want to do this with schema for + // accuracy, but for our purposes here it only needs to be approximate. + // This should produce an identical result for most cases, though + // in particular will differ in a few cases: + // - The keys used for elements in a set will be different + // - Values for attributes of type cty.DynamicPseudoType will be + // misinterpreted (but these weren't possible in old world anyway) + var attributes map[string]string + if obj := is.Current; obj != nil { + switch { + case obj.AttrsFlat != nil: + // Easy (but increasingly unlikely) case: the state hasn't + // actually been upgraded to the new form yet. + attributes = obj.AttrsFlat + case obj.AttrsJSON != nil: + ty, err := ctyjson.ImpliedType(obj.AttrsJSON) + if err == nil { + val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) + if err == nil { + attributes = hcl2shim.FlatmapValueFromHCL2(val) + } + } + } + } + attrKeys := make([]string, 0, len(attributes)) + for ak, val := range attributes { + if ak == "id" { + continue + } + + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + // CAUTION: Since deposed keys are now random strings instead of + // incrementing integers, this result will not be deterministic + // if there is more than one deposed object. + i := 1 + for _, t := range is.Deposed { + id := LegacyInstanceObjectID(t) + taintStr := "" + if t.Status == ObjectTainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) + i++ + } + + if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range obj.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) + } + } + } + + if len(m.OutputValues) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + v := m.OutputValues[k] + lv := hcl2shim.ConfigValueFromHCL2(v.Value) + switch vTyped := lv.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + default: + buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) + } + } + } + + return buf.String() +} + +// LegacyInstanceObjectID is a helper for extracting an object id value from +// an instance object in a way that approximates how we used to do this +// for the old state types. ID is no longer first-class, so this is preserved +// only for compatibility with old tests that include the id as part of their +// expected value. +func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { + if obj == nil { + return "" + } + + if obj.AttrsJSON != nil { + type WithID struct { + ID string `json:"id"` + } + var withID WithID + err := json.Unmarshal(obj.AttrsJSON, &withID) + if err == nil { + return withID.ID + } + } else if obj.AttrsFlat != nil { + if flatID, exists := obj.AttrsFlat["id"]; exists { + return flatID + } + } + + // For resource types created after we removed id as special there may + // not actually be one at all. This is okay because older tests won't + // encounter this, and new tests shouldn't be using ids. + return "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go new file mode 100644 index 000000000..042ce51c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/diagnostics.go @@ -0,0 +1,62 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go new file mode 100644 index 000000000..625d0cf42 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/doc.go @@ -0,0 +1,3 @@ +// Package statefile deals with the file format used to serialize states for +// persistent storage and then deserialize them into memory again later. +package statefile diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go new file mode 100644 index 000000000..70c8ba6ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/file.go @@ -0,0 +1,31 @@ +package statefile + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// File is the in-memory representation of a state file. It includes the state +// itself along with various metadata used to track changing state files for +// the same configuration over time. +type File struct { + // TerraformVersion is the version of Terraform that wrote this state file. + TerraformVersion *version.Version + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial uint64 + + // Lineage is set when a new, blank state file is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string + + // State is the actual state represented by this file. + State *states.State +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go new file mode 100644 index 000000000..f1899cd22 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/read.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// ErrNoState is returned by ReadState when the state file is empty. +var ErrNoState = errors.New("no state") + +// Read reads a state from the given reader. +// +// Legacy state format versions 1 through 3 are supported, but the result will +// contain object attributes in the deprecated "flatmap" format and so must +// be upgraded by the caller before use. +// +// If the state file is empty, the special error value ErrNoState is returned. +// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics +// potentially describing multiple errors. +func Read(r io.Reader) (*File, error) { + // Some callers provide us a "typed nil" *os.File here, which would + // cause us to panic below if we tried to use it. + if f, ok := r.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + var diags tfdiags.Diagnostics + + // We actually just buffer the whole thing in memory, because states are + // generally not huge and we need to do be able to sniff for a version + // number before full parsing. + src, err := ioutil.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read state file", + fmt.Sprintf("The state file could not be read: %s", err), + )) + return nil, diags.Err() + } + + if len(src) == 0 { + return nil, ErrNoState + } + + state, diags := readState(src) + if diags.HasErrors() { + return nil, diags.Err() + } + + if state == nil { + // Should never happen + panic("readState returned nil state with no errors") + } + + if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { + return state, fmt.Errorf( + "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", + state.TerraformVersion, + tfversion.SemVer, + state.TerraformVersion, + ) + } + + return state, diags.Err() +} + +func readState(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if looksLikeVersion0(src) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", + )) + return nil, diags + } + + version, versionDiags := sniffJSONStateVersion(src) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + return nil, diags + } + + switch version { + case 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", + )) + return nil, diags + case 1: + return readStateV1(src) + case 2: + return readStateV2(src) + case 3: + return readStateV3(src) + case 4: + return readStateV4(src) + default: + thisVersion := tfversion.SemVer.String() + creatingVersion := sniffJSONStateTerraformVersion(src) + switch { + case creatingVersion != "": + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), + )) + } + return nil, diags + } +} + +func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + type VersionSniff struct { + Version *uint64 `json:"version"` + } + var sniff VersionSniff + err := json.Unmarshal(src, &sniff) + if err != nil { + switch tErr := err.(type) { + case *json.SyntaxError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file could not be parsed as JSON.", + )) + } + } + + if sniff.Version == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file does not have a \"version\" attribute, which is required to identify the format version.", + )) + return 0, diags + } + + return *sniff.Version, diags +} + +// sniffJSONStateTerraformVersion attempts to sniff the Terraform version +// specification from the given state file source code. The result is either +// a version string or an empty string if no version number could be extracted. +// +// This is a best-effort function intended to produce nicer error messages. It +// should not be used for any real processing. +func sniffJSONStateTerraformVersion(src []byte) string { + type VersionSniff struct { + Version string `json:"terraform_version"` + } + var sniff VersionSniff + + err := json.Unmarshal(src, &sniff) + if err != nil { + return "" + } + + // Attempt to parse the string as a version so we won't report garbage + // as a version number. + _, err = version.NewVersion(sniff.Version) + if err != nil { + return "" + } + + return sniff.Version +} + +// unsupportedFormat is a diagnostic summary message for when the state file +// seems to not be a state file at all, or is not a supported version. +// +// Use invalidFormat instead for the subtly-different case of "this looks like +// it's intended to be a state file but it's not structured correctly". +const unsupportedFormat = "Unsupported state file format" + +const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go new file mode 100644 index 000000000..9b533317b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version0.go @@ -0,0 +1,23 @@ +package statefile + +// looksLikeVersion0 sniffs for the signature indicating a version 0 state +// file. +// +// Version 0 was the number retroactively assigned to Terraform's initial +// (unversioned) binary state file format, which was later superseded by the +// version 1 format in JSON. +// +// Version 0 is no longer supported, so this is used only to detect it and +// return a nice error to the user. +func looksLikeVersion0(src []byte) bool { + // Version 0 files begin with the magic prefix "tfstate". + const magic = "tfstate" + if len(src) < len(magic) { + // Not even long enough to have the magic prefix + return false + } + if string(src[0:len(magic)]) == magic { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go new file mode 100644 index 000000000..85b422ad2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1.go @@ -0,0 +1,167 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV1 := &stateV1{} + err := json.Unmarshal(src, sV1) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV1(sV1) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2, err := upgradeStateV1ToV2(sV1) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV1 is a representation of the legacy JSON state format version 1. +// +// It is only used to read version 1 JSON files prior to upgrading them to +// the current format. +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go new file mode 100644 index 000000000..0b417e1c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version1_upgrade.go @@ -0,0 +1,172 @@ +package statefile + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { + log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*moduleStateV2, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &stateV2{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &remoteStateV2{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root. + path = []string{"root"} + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*outputStateV2) + for key, output := range old.Outputs { + outputs[key] = &outputStateV2{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*resourceStateV2) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &moduleStateV2{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*instanceStateV2, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &resourceStateV2{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &instanceStateV2{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Meta: newMeta, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go new file mode 100644 index 000000000..6d10166b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2.go @@ -0,0 +1,204 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2 := &stateV2{} + err := json.Unmarshal(src, sV2) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3, err := upgradeStateV2ToV3(sV2) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 2. +// +// It is only used to read version 2 JSON files prior to upgrading them to +// the current format. +type stateV2 struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV2 `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *backendStateV2 `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV2 `json:"modules"` +} + +type remoteStateV2 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type outputStateV2 struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` +} + +type moduleStateV2 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*outputStateV2 `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV2 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` +} + +type resourceStateV2 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV2 `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*instanceStateV2 `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` +} + +type instanceStateV2 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +type backendStateV2 struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go new file mode 100644 index 000000000..2d03c07c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version2_upgrade.go @@ -0,0 +1,145 @@ +package statefile + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" +) + +func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { + if old == nil { + return (*stateV3)(nil), nil + } + + var new *stateV3 + { + copy, err := copystructure.Config{Lock: true}.Copy(old) + if err != nil { + panic(err) + } + newWrongType := copy.(*stateV2) + newRightType := (stateV3)(*newWrongType) + new = &newRightType + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go new file mode 100644 index 000000000..1c81e7169 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3.go @@ -0,0 +1,50 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3 := &stateV3{} + err := json.Unmarshal(src, sV3) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4, err := upgradeStateV3ToV4(sV3) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 3. +// +// It is only used to read version 3 JSON files prior to upgrading them to +// the current format. +// +// The differences between version 2 and version 3 are only in the data and +// not in the structure, so stateV3 actually shares the same structs as +// stateV2. Type stateV3 represents that the data within is formatted as +// expected by the V3 format, rather than the V2 format. +type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go new file mode 100644 index 000000000..f08a62b2d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version3_upgrade.go @@ -0,0 +1,444 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { + + if old.Serial < 0 { + // The new format is using uint64 here, which should be fine for any + // real state (we only used positive integers in practice) but we'll + // catch this explicitly here to avoid weird behavior if a state file + // has been tampered with in some way. + return nil, fmt.Errorf("state has serial less than zero, which is invalid") + } + + new := &stateV4{ + TerraformVersion: old.TFVersion, + Serial: uint64(old.Serial), + Lineage: old.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + if new.TerraformVersion == "" { + // Older formats considered this to be optional, but now it's required + // and so we'll stub it out with something that's definitely older + // than the version that really created this state. + new.TerraformVersion = "0.0.0" + } + + for _, msOld := range old.Modules { + if len(msOld.Path) < 1 || msOld.Path[0] != "root" { + return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) + } + + // Convert legacy-style module address into our newer address type. + // Since these old formats are only generated by versions of Terraform + // that don't support count and for_each on modules, we can just assume + // all of the modules are unkeyed. + moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) + for i, name := range msOld.Path[1:] { + moduleAddr[i] = addrs.ModuleInstanceStep{ + Name: name, + InstanceKey: addrs.NoKey, + } + } + + // In a v3 state file, a "resource state" is actually an instance + // state, so we need to fill in a missing level of heirarchy here + // by lazily creating resource states as we encounter them. + // We'll track them in here, keyed on the string representation of + // the resource address. + resourceStates := map[string]*resourceStateV4{} + + for legacyAddr, rsOld := range msOld.Resources { + instAddr, err := parseLegacyResourceAddress(legacyAddr) + if err != nil { + return nil, err + } + + resAddr := instAddr.Resource + rs, exists := resourceStates[resAddr.String()] + if !exists { + var modeStr string + switch resAddr.Mode { + case addrs.ManagedResourceMode: + modeStr = "managed" + case addrs.DataResourceMode: + modeStr = "data" + default: + return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode) + } + + // In state versions prior to 4 we allowed each instance of a + // resource to have its own provider configuration address, + // which makes no real sense in practice because providers + // are associated with resources in the configuration. We + // elevate that to the resource level during this upgrade, + // implicitly taking the provider address of the first instance + // we encounter for each resource. While this is lossy in + // theory, in practice there is no reason for these values to + // differ between instances. + var providerAddr addrs.AbsProviderConfig + oldProviderAddr := rsOld.Provider + if strings.Contains(oldProviderAddr, "provider.") { + // Smells like a new-style provider address, but we'll test it. + var diags tfdiags.Diagnostics + providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) + } + } else { + // Smells like an old-style module-local provider address, + // which we'll need to migrate. We'll assume it's referring + // to the same module the resource is in, which might be + // incorrect but it'll get fixed up next time any updates + // are made to an instance. + if oldProviderAddr != "" { + localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err()) + } + providerAddr = localAddr.Absolute(moduleAddr) + } else { + providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) + } + } + + rs = &resourceStateV4{ + Module: moduleAddr.String(), + Mode: modeStr, + Type: resAddr.Type, + Name: resAddr.Name, + Instances: []instanceObjectStateV4{}, + ProviderConfig: providerAddr.String(), + } + resourceStates[resAddr.String()] = rs + } + + // Now we'll deal with the instance itself, which may either be + // the first instance in a resource we just created or an additional + // instance for a resource added on a prior loop. + instKey := instAddr.Key + if isOld := rsOld.Primary; isOld != nil { + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) + if err != nil { + return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + for i, isOld := range rsOld.Deposed { + // When we migrate old instances we'll use sequential deposed + // keys just so that the upgrade result is deterministic. New + // deposed keys allocated moving forward will be pseudorandomly + // selected, but we check for collisions and so these + // non-random ones won't hurt. + deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) + if err != nil { + return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + + if instKey != addrs.NoKey && rs.EachMode == "" { + rs.EachMode = "list" + } + } + + for _, rs := range resourceStates { + new.Resources = append(new.Resources, *rs) + } + + if len(msOld.Path) == 1 && msOld.Path[0] == "root" { + // We'll migrate the outputs for this module too, then. + for name, oldOS := range msOld.Outputs { + newOS := outputStateV4{ + Sensitive: oldOS.Sensitive, + } + + valRaw := oldOS.Value + valSrc, err := json.Marshal(valRaw) + if err != nil { + // Should never happen, because this value came from JSON + // in the first place and so we're just round-tripping here. + return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) + } + + // The "type" field in state V2 wasn't really that useful + // since it was only able to capture string vs. list vs. map. + // For this reason, during upgrade we'll just discard it + // altogether and use cty's idea of the implied type of + // turning our old value into JSON. + ty, err := ctyjson.ImpliedType(valSrc) + if err != nil { + // REALLY should never happen, because we literally just + // encoded this as JSON above! + return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) + } + + // ImpliedType tends to produce structural types, but since older + // version of Terraform didn't support those a collection type + // is probably what was intended, so we'll see if we can + // interpret our value as one. + ty = simplifyImpliedValueType(ty) + + tySrc, err := ctyjson.MarshalType(ty) + if err != nil { + return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) + } + + newOS.ValueRaw = json.RawMessage(valSrc) + newOS.ValueTypeRaw = json.RawMessage(tySrc) + + new.RootOutputs[name] = newOS + } + } + } + + new.normalize() + + return new, nil +} + +func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { + + // Schema versions were, in prior formats, a private concern of the provider + // SDK, and not a first-class concept in the state format. Here we're + // sniffing for the pre-0.12 SDK's way of representing schema versions + // and promoting it to our first-class field if we find it. We'll ignore + // it if it doesn't look like what the SDK would've written. If this + // sniffing fails then we'll assume schema version 0. + var schemaVersion uint64 + migratedSchemaVersion := false + if raw, exists := isOld.Meta["schema_version"]; exists { + switch tv := raw.(type) { + case string: + v, err := strconv.ParseUint(tv, 10, 64) + if err == nil { + schemaVersion = v + migratedSchemaVersion = true + } + case int: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + case float64: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + } + } + + private := map[string]interface{}{} + for k, v := range isOld.Meta { + if k == "schema_version" && migratedSchemaVersion { + // We're gonna promote this into our first-class schema version field + continue + } + private[k] = v + } + var privateJSON []byte + if len(private) != 0 { + var err error + privateJSON, err = json.Marshal(private) + if err != nil { + // This shouldn't happen, because the Meta values all came from JSON + // originally anyway. + return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) + } + } + + var status string + if isOld.Tainted { + status = "tainted" + } + + var instKeyRaw interface{} + switch tk := instKey.(type) { + case addrs.IntKey: + instKeyRaw = int(tk) + case addrs.StringKey: + instKeyRaw = string(tk) + default: + if instKeyRaw != nil { + return nil, fmt.Errorf("unsupported instance key: %#v", instKey) + } + } + + var attributes map[string]string + if isOld.Attributes != nil { + attributes = make(map[string]string, len(isOld.Attributes)) + for k, v := range isOld.Attributes { + attributes[k] = v + } + } + if isOld.ID != "" { + // As a special case, if we don't already have an "id" attribute and + // yet there's a non-empty first-class ID on the old object then we'll + // create a synthetic id attribute to avoid losing that first-class id. + // In practice this generally arises only in tests where state literals + // are hand-written in a non-standard way; real code prior to 0.12 + // would always force the first-class ID to be copied into the + // id attribute before storing. + if attributes == nil { + attributes = make(map[string]string, len(isOld.Attributes)) + } + if idVal := attributes["id"]; idVal == "" { + attributes["id"] = isOld.ID + } + } + + dependencies := make([]string, len(rsOld.Dependencies)) + for i, v := range rsOld.Dependencies { + depStr, err := parseLegacyDependency(v) + if err != nil { + return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err) + } + dependencies[i] = depStr + } + + return &instanceObjectStateV4{ + IndexKey: instKeyRaw, + Status: status, + Deposed: string(deposedKey), + AttributesFlat: attributes, + Dependencies: dependencies, + SchemaVersion: schemaVersion, + PrivateRaw: privateJSON, + }, nil +} + +// parseLegacyResourceAddress parses the different identifier format used +// state formats before version 4, like "instance.name.0". +func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { + var ret addrs.ResourceInstance + + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + ret.Resource.Mode = addrs.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + ret.Resource.Mode = addrs.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + ret.Resource.Type = parts[0] + ret.Resource.Name = parts[1] + ret.Key = addrs.NoKey + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) + } + + ret.Key = addrs.IntKey(idx) + } + + return ret, nil +} + +// simplifyImpliedValueType attempts to heuristically simplify a value type +// derived from a legacy stored output value into something simpler that +// is closer to what would've fitted into the pre-v0.12 value type system. +func simplifyImpliedValueType(ty cty.Type) cty.Type { + switch { + case ty.IsTupleType(): + // If all of the element types are the same then we'll make this + // a list instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyTuple) { + // Don't know what the element type would be, then. + return ty + } + + etys := ty.TupleElementTypes() + ety := etys[0] + for _, other := range etys[1:] { + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.List(ety) + + case ty.IsObjectType(): + // If all of the attribute types are the same then we'll make this + // a map instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyObject) { + // Don't know what the element type would be, then. + return ty + } + + atys := ty.AttributeTypes() + var ety cty.Type + for _, other := range atys { + if ety == cty.NilType { + ety = other + continue + } + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.Map(ety) + + default: + // No other normalizations are possible + return ty + } +} + +func parseLegacyDependency(s string) (string, error) { + parts := strings.Split(s, ".") + ret := parts[0] + for _, part := range parts[1:] { + if part == "*" { + break + } + if i, err := strconv.Atoi(part); err == nil { + ret = ret + fmt.Sprintf("[%d]", i) + break + } + ret = ret + "." + part + } + + // The result must parse as a reference, or else we'll create an invalid + // state file. + var diags tfdiags.Diagnostics + _, diags = addrs.ParseRefStr(ret) + if diags.HasErrors() { + return "", diags.Err() + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go new file mode 100644 index 000000000..164b57f82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/version4.go @@ -0,0 +1,604 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + version "github.com/hashicorp/go-version" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4 := &stateV4{} + err := json.Unmarshal(src, sV4) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var tfVersion *version.Version + if sV4.TerraformVersion != "" { + var err error + tfVersion, err = version.NewVersion(sV4.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid Terraform version string", + fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), + )) + } + } + + file := &File{ + TerraformVersion: tfVersion, + Serial: sV4.Serial, + Lineage: sV4.Lineage, + } + + state := states.NewState() + + for _, rsV4 := range sV4.Resources { + rAddr := addrs.Resource{ + Type: rsV4.Type, + Name: rsV4.Name, + } + switch rsV4.Mode { + case "managed": + rAddr.Mode = addrs.ManagedResourceMode + case "data": + rAddr.Mode = addrs.DataResourceMode + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource mode in state", + fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), + )) + continue + } + + moduleAddr := addrs.RootModuleInstance + if rsV4.Module != "" { + var addrDiags tfdiags.Diagnostics + moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + } + + providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) + diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + + var eachMode states.EachMode + switch rsV4.EachMode { + case "": + eachMode = states.NoEach + case "list": + eachMode = states.EachList + case "map": + eachMode = states.EachMap + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource metadata in state", + fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), + )) + continue + } + + ms := state.EnsureModule(moduleAddr) + + // Ensure the resource container object is present in the state. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + + for _, isV4 := range rsV4.Instances { + keyRaw := isV4.IndexKey + var key addrs.InstanceKey + switch tk := keyRaw.(type) { + case int: + key = addrs.IntKey(tk) + case float64: + // Since JSON only has one number type, reading from encoding/json + // gives us a float64 here even if the number is whole. + // float64 has a smaller integer range than int, but in practice + // we rarely have more than a few tens of instances and so + // it's unlikely that we'll exhaust the 52 bits in a float64. + key = addrs.IntKey(int(tk)) + case string: + key = addrs.StringKey(tk) + default: + if keyRaw != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), + )) + continue + } + key = addrs.NoKey + } + + instAddr := rAddr.Instance(key) + + obj := &states.ResourceInstanceObjectSrc{ + SchemaVersion: isV4.SchemaVersion, + } + + { + // Instance attributes + switch { + case isV4.AttributesRaw != nil: + obj.AttrsJSON = isV4.AttributesRaw + case isV4.AttributesFlat != nil: + obj.AttrsFlat = isV4.AttributesFlat + default: + // This is odd, but we'll accept it and just treat the + // object has being empty. In practice this should arise + // only from the contrived sort of state objects we tend + // to hand-write inline in tests. + obj.AttrsJSON = []byte{'{', '}'} + } + } + + { + // Status + raw := isV4.Status + switch raw { + case "": + obj.Status = states.ObjectReady + case "tainted": + obj.Status = states.ObjectTainted + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), + )) + continue + } + } + + if raw := isV4.PrivateRaw; len(raw) > 0 { + obj.Private = raw + } + + { + depsRaw := isV4.Dependencies + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, depRaw := range depsRaw { + ref, refDiags := addrs.ParseRefStr(depRaw) + diags = diags.Append(refDiags) + if refDiags.HasErrors() { + continue + } + if len(ref.Remaining) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), + )) + } + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) + } + deps = append(deps, ref.Subject) + } + obj.Dependencies = deps + } + + switch { + case isV4.Deposed != "": + dk := states.DeposedKey(isV4.Deposed) + if len(dk) != 8 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), + )) + continue + } + is := ms.ResourceInstance(instAddr) + if is.HasDeposed(dk) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), + )) + continue + } + + ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) + default: + is := ms.ResourceInstance(instAddr) + if is.HasCurrent() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), + )) + continue + } + + ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) + } + } + + // We repeat this after creating the instances because + // SetResourceInstanceCurrent automatically resets this metadata based + // on the incoming objects. That behavior is useful when we're making + // piecemeal updates to the state during an apply, but when we're + // reading the state file we want to reflect its contents exactly. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + } + + // The root module is special in that we persist its attributes and thus + // need to reload them now. (For descendent modules we just re-calculate + // them based on the latest configuration on each run.) + { + rootModule := state.RootModule() + for name, fos := range sV4.RootOutputs { + os := &states.OutputValue{} + os.Sensitive = fos.Sensitive + + ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value type in state", + fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), + )) + continue + } + + val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value saved in state", + fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), + )) + continue + } + + os.Value = val + rootModule.OutputValues[name] = os + } + } + + file.State = state + return file, diags +} + +func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { + // Here we'll convert back from the "File" representation to our + // stateV4 struct representation and write that. + // + // While we support legacy state formats for reading, we only support the + // latest for writing and so if a V5 is added in future then this function + // should be deleted and replaced with a writeStateV5, even though the + // read/prepare V4 functions above would stick around. + + var diags tfdiags.Diagnostics + if file == nil || file.State == nil { + panic("attempt to write nil state to file") + } + + var terraformVersion string + if file.TerraformVersion != nil { + terraformVersion = file.TerraformVersion.String() + } + + sV4 := &stateV4{ + TerraformVersion: terraformVersion, + Serial: file.Serial, + Lineage: file.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + for name, os := range file.State.RootModule().OutputValues { + src, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), + )) + continue + } + + typeSrc, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), + )) + continue + } + + sV4.RootOutputs[name] = outputStateV4{ + Sensitive: os.Sensitive, + ValueRaw: json.RawMessage(src), + ValueTypeRaw: json.RawMessage(typeSrc), + } + } + + for _, ms := range file.State.Modules { + moduleAddr := ms.Addr + for _, rs := range ms.Resources { + resourceAddr := rs.Addr + + var mode string + switch resourceAddr.Mode { + case addrs.ManagedResourceMode: + mode = "managed" + case addrs.DataResourceMode: + mode = "data" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), + )) + continue + } + + var eachMode string + switch rs.EachMode { + case states.NoEach: + eachMode = "" + case states.EachList: + eachMode = "list" + case states.EachMap: + eachMode = "map" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), + )) + continue + } + + sV4.Resources = append(sV4.Resources, resourceStateV4{ + Module: moduleAddr.String(), + Mode: mode, + Type: resourceAddr.Type, + Name: resourceAddr.Name, + EachMode: eachMode, + ProviderConfig: rs.ProviderConfig.String(), + Instances: []instanceObjectStateV4{}, + }) + rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) + + for key, is := range rs.Instances { + if is.HasCurrent() { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, is.Current, states.NotDeposed, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + for dk, obj := range is.Deposed { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, obj, dk, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + } + } + } + + sV4.normalize() + + src, err := json.MarshalIndent(sV4, "", " ") + if err != nil { + // Shouldn't happen if we do our conversion to *stateV4 correctly above. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), + )) + return diags + } + src = append(src, '\n') + + _, err = w.Write(src) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write state", + fmt.Sprintf("An error occured while writing the serialized state: %s.", err), + )) + return diags + } + + return diags +} + +func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var status string + switch obj.Status { + case states.ObjectReady: + status = "" + case states.ObjectTainted: + status = "tainted" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), + )) + } + + var privateRaw []byte + if len(obj.Private) > 0 { + privateRaw = obj.Private + } + + deps := make([]string, len(obj.Dependencies)) + for i, depAddr := range obj.Dependencies { + deps[i] = depAddr.String() + } + + var rawKey interface{} + switch tk := key.(type) { + case addrs.IntKey: + rawKey = int(tk) + case addrs.StringKey: + rawKey = string(tk) + default: + if key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), + )) + } + } + + return append(isV4s, instanceObjectStateV4{ + IndexKey: rawKey, + Deposed: string(deposed), + Status: status, + SchemaVersion: obj.SchemaVersion, + AttributesFlat: obj.AttrsFlat, + AttributesRaw: obj.AttrsJSON, + PrivateRaw: privateRaw, + Dependencies: deps, + }), diags +} + +type stateV4 struct { + Version stateVersionV4 `json:"version"` + TerraformVersion string `json:"terraform_version"` + Serial uint64 `json:"serial"` + Lineage string `json:"lineage"` + RootOutputs map[string]outputStateV4 `json:"outputs"` + Resources []resourceStateV4 `json:"resources"` +} + +// normalize makes some in-place changes to normalize the way items are +// stored to ensure that two functionally-equivalent states will be stored +// identically. +func (s *stateV4) normalize() { + sort.Stable(sortResourcesV4(s.Resources)) + for _, rs := range s.Resources { + sort.Stable(sortInstancesV4(rs.Instances)) + } +} + +type outputStateV4 struct { + ValueRaw json.RawMessage `json:"value"` + ValueTypeRaw json.RawMessage `json:"type"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type resourceStateV4 struct { + Module string `json:"module,omitempty"` + Mode string `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + EachMode string `json:"each,omitempty"` + ProviderConfig string `json:"provider"` + Instances []instanceObjectStateV4 `json:"instances"` +} + +type instanceObjectStateV4 struct { + IndexKey interface{} `json:"index_key,omitempty"` + Status string `json:"status,omitempty"` + Deposed string `json:"deposed,omitempty"` + + SchemaVersion uint64 `json:"schema_version"` + AttributesRaw json.RawMessage `json:"attributes,omitempty"` + AttributesFlat map[string]string `json:"attributes_flat,omitempty"` + + PrivateRaw []byte `json:"private,omitempty"` + + Dependencies []string `json:"depends_on,omitempty"` +} + +// stateVersionV4 is a weird special type we use to produce our hard-coded +// "version": 4 in the JSON serialization. +type stateVersionV4 struct{} + +func (sv stateVersionV4) MarshalJSON() ([]byte, error) { + return []byte{'4'}, nil +} + +func (sv stateVersionV4) UnmarshalJSON([]byte) error { + // Nothing to do: we already know we're version 4 + return nil +} + +type sortResourcesV4 []resourceStateV4 + +func (sr sortResourcesV4) Len() int { return len(sr) } +func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } +func (sr sortResourcesV4) Less(i, j int) bool { + switch { + case sr[i].Mode != sr[j].Mode: + return sr[i].Mode < sr[j].Mode + case sr[i].Type != sr[j].Type: + return sr[i].Type < sr[j].Type + case sr[i].Name != sr[j].Name: + return sr[i].Name < sr[j].Name + default: + return false + } +} + +type sortInstancesV4 []instanceObjectStateV4 + +func (si sortInstancesV4) Len() int { return len(si) } +func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } +func (si sortInstancesV4) Less(i, j int) bool { + ki := si[i].IndexKey + kj := si[j].IndexKey + if ki != kj { + if (ki == nil) != (kj == nil) { + return ki == nil + } + if kii, isInt := ki.(int); isInt { + if kji, isInt := kj.(int); isInt { + return kii < kji + } + return true + } + if kis, isStr := ki.(string); isStr { + if kjs, isStr := kj.(string); isStr { + return kis < kjs + } + return true + } + } + if si[i].Deposed != si[j].Deposed { + return si[i].Deposed < si[j].Deposed + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go new file mode 100644 index 000000000..8fdca4580 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile/write.go @@ -0,0 +1,17 @@ +package statefile + +import ( + "io" + + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// Write writes the given state to the given writer in the current state +// serialization format. +func Write(s *File, w io.Writer) error { + // Always record the current terraform version in the state. + s.TerraformVersion = tfversion.SemVer + + diags := writeStateV4(s, w) + return diags.Err() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go new file mode 100644 index 000000000..6d2361254 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/states/sync.go @@ -0,0 +1,484 @@ +package states + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// SyncState is a wrapper around State that provides concurrency-safe access to +// various common operations that occur during a Terraform graph walk, or other +// similar concurrent contexts. +// +// When a SyncState wrapper is in use, no concurrent direct access to the +// underlying objects is permitted unless the caller first acquires an explicit +// lock, using the Lock and Unlock methods. Most callers should _not_ +// explicitly lock, and should instead use the other methods of this type that +// handle locking automatically. +// +// Since SyncState is able to safely consolidate multiple updates into a single +// atomic operation, many of its methods are at a higher level than those +// of the underlying types, and operate on the state as a whole rather than +// on individual sub-structures of the state. +// +// SyncState can only protect against races within its own methods. It cannot +// provide any guarantees about the order in which concurrent operations will +// be processed, so callers may still need to employ higher-level techniques +// for ensuring correct operation sequencing, such as building and walking +// a dependency graph. +type SyncState struct { + state *State + lock sync.RWMutex +} + +// Module returns a snapshot of the state of the module instance with the given +// address, or nil if no such module is tracked. +// +// The return value is a pointer to a copy of the module state, which the +// caller may then freely access and mutate. However, since the module state +// tends to be a large data structure with many child objects, where possible +// callers should prefer to use a more granular accessor to access a child +// module directly, and thus reduce the amount of copying required. +func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { + s.lock.RLock() + ret := s.state.Module(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// OutputValue returns a snapshot of the state of the output value with the +// given address, or nil if no such output value is tracked. +// +// The return value is a pointer to a copy of the output value state, which the +// caller may then freely access and mutate. +func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + s.lock.RLock() + ret := s.state.OutputValue(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// SetOutputValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the output is not yet tracked in state then it +// be added as a side-effect. +func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) +} + +// RemoveOutputValue removes the stored value for the output value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveOutputValue(addr.OutputValue.Name) + s.maybePruneModule(addr.Module) +} + +// LocalValue returns the current value associated with the given local value +// address. +func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { + s.lock.RLock() + // cty.Value is immutable, so we don't need any extra copying here. + ret := s.state.LocalValue(addr) + s.lock.RUnlock() + return ret +} + +// SetLocalValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the local value is not yet tracked in state then it +// will be added as a side-effect. +func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetLocalValue(addr.LocalValue.Name, value) +} + +// RemoveLocalValue removes the stored value for the local value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveLocalValue(addr.LocalValue.Name) + s.maybePruneModule(addr.Module) +} + +// Resource returns a snapshot of the state of the resource with the given +// address, or nil if no such resource is tracked. +// +// The return value is a pointer to a copy of the resource state, which the +// caller may then freely access and mutate. +func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { + s.lock.RLock() + ret := s.state.Resource(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstance returns a snapshot of the state the resource instance with +// the given address, or nil if no such instance is tracked. +// +// The return value is a pointer to a copy of the instance state, which the +// caller may then freely access and mutate. +func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + s.lock.RLock() + ret := s.state.ResourceInstance(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstanceObject returns a snapshot of the current instance object +// of the given generation belonging to the instance with the given address, +// or nil if no such object is tracked.. +// +// The return value is a pointer to a copy of the object, which the caller may +// then freely access and mutate. +func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { + s.lock.RLock() + defer s.lock.RUnlock() + + inst := s.state.ResourceInstance(addr) + if inst == nil { + return nil + } + return inst.GetGeneration(gen).DeepCopy() +} + +// SetResourceMeta updates the resource-level metadata for the resource at +// the given address, creating the containing module state and resource state +// as a side-effect if not already present. +func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceMeta(addr.Resource, eachMode, provider) +} + +// RemoveResourceIfEmpty is similar to RemoveResource but first checks to +// make sure there are no instances or objects left in the resource. +// +// Returns true if the resource was removed, or false if remaining child +// objects prevented its removal. Returns true also if the resource was +// already absent, and thus no action needed to be taken. +func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return true // nothing to do + } + rs := ms.Resource(addr.Resource) + if rs == nil { + return true // nothing to do + } + if len(rs.Instances) != 0 { + // We don't check here for the possibility of instances that exist + // but don't have any objects because it's the responsibility of the + // instance-mutation methods to prune those away automatically. + return false + } + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) + return true +} + +// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a +// resource has changed from having "count" set to not set, or vice-versa, and +// so we need to rename the zeroth instance key to no key at all, or vice-versa. +// +// Set countEnabled to true if the resource has count set in its new +// configuration, or false if it does not. +// +// The state is modified in-place if necessary, moving a resource instance +// between the two addresses. The return value is true if a change was made, +// and false otherwise. +func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return false + } + + relAddr := addr.Resource + rs := ms.Resource(relAddr) + if rs == nil { + return false + } + huntKey := addrs.NoKey + replaceKey := addrs.InstanceKey(addrs.IntKey(0)) + if !countEnabled { + huntKey, replaceKey = replaceKey, huntKey + } + + is, exists := rs.Instances[huntKey] + if !exists { + return false + } + + if _, exists := rs.Instances[replaceKey]; exists { + // If the replacement key also exists then we'll do nothing and keep both. + return false + } + + // If we get here then we need to "rename" from hunt to replace + rs.Instances[replaceKey] = is + delete(rs.Instances, huntKey) + return true +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simultaneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance as a whole will be removed, which +// may in turn also remove the containing module if it becomes empty. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// DeposeResourceInstanceObject moves the current instance object for the +// given resource instance address into the deposed set, leaving the instance +// without a current object. +// +// The return value is the newly-allocated deposed key, or NotDeposed if the +// given instance is already lacking a current object. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then there cannot be a current object for the +// given instance, and so NotDeposed will be returned without modifying the +// state at all. +func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return NotDeposed + } + + return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) +} + +// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject +// but uses a pre-allocated key. It's the caller's responsibility to ensure +// that there aren't any races to use a particular key; this method will panic +// if the given key is already in use. +func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + if forcedKey == NotDeposed { + // Usage error: should use DeposeResourceInstanceObject in this case + panic("DeposeResourceInstanceObjectForceKey called without forced key") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + return // Nothing to do, since there can't be any current object either. + } + + ms.deposeResourceInstanceObject(addr.Resource, forcedKey) +} + +// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the +// given key on the specified resource as the current object for that instance +// if and only if that would not cause us to forget an existing current +// object for that instance. +// +// Returns true if the object was restored to current, or false if no change +// was made at all. +func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if key == NotDeposed { + panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + // Nothing to do, since the specified deposed object cannot exist. + return false + } + + return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) +} + +// RemovePlannedResourceInstanceObjects removes from the state any resource +// instance objects that have the status ObjectPlanned, indiciating that they +// are just transient placeholders created during planning. +// +// Note that this does not restore any "ready" or "tainted" object that might +// have been present before the planned object was written. The only real use +// for this method is in preparing the state created during a refresh walk, +// where we run the planning step for certain instances just to create enough +// information to allow correct expression evaluation within provider and +// data resource blocks. Discarding planned instances in that case is okay +// because the refresh phase only creates planned objects to stand in for +// objects that don't exist yet, and thus the planned object must have been +// absent before by definition. +func (s *SyncState) RemovePlannedResourceInstanceObjects() { + // TODO: Merge together the refresh and plan phases into a single walk, + // so we can remove the need to create this "partial plan" during refresh + // that we then need to clean up before proceeding. + + s.lock.Lock() + defer s.lock.Unlock() + + for _, ms := range s.state.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resAddr := rs.Addr + + for ik, is := range rs.Instances { + instAddr := resAddr.Instance(ik) + + if is.Current != nil && is.Current.Status == ObjectPlanned { + // Setting the current instance to nil removes it from the + // state altogether if there are not also deposed instances. + ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) + } + + for dk, obj := range is.Deposed { + // Deposed objects should never be "planned", but we'll + // do this anyway for the sake of completeness. + if obj.Status == ObjectPlanned { + ms.ForgetResourceInstanceDeposed(instAddr, dk) + } + } + } + } + + // We may have deleted some objects, which means that we may have + // left a module empty, and so we must prune to preserve the invariant + // that only the root module is allowed to be empty. + s.maybePruneModule(moduleAddr) + } +} + +// Lock acquires an explicit lock on the state, allowing direct read and write +// access to the returned state object. The caller must call Unlock once +// access is no longer needed, and then immediately discard the state pointer +// pointer. +// +// Most callers should not use this. Instead, use the concurrency-safe +// accessors and mutators provided directly on SyncState. +func (s *SyncState) Lock() *State { + s.lock.Lock() + return s.state +} + +// Unlock releases a lock previously acquired by Lock, at which point the +// caller must cease all use of the state pointer that was returned. +// +// Do not call this method except to end an explicit lock acquired by +// Lock. If a caller calls Unlock without first holding the lock, behavior +// is undefined. +func (s *SyncState) Unlock() { + s.lock.Unlock() +} + +// maybePruneModule will remove a module from the state altogether if it is +// empty, unless it's the root module which must always be present. +// +// This helper method is not concurrency-safe on its own, so must only be +// called while the caller is already holding the lock for writing. +func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + // We never prune the root. + return + } + + ms := s.state.Module(addr) + if ms == nil { + return + } + + if ms.empty() { + log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) + s.state.RemoveModule(addr) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go new file mode 100644 index 000000000..8e41f46ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/config_traversals.go @@ -0,0 +1,68 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} + +// FormatErrorPrefixed is like FormatError except that it presents any path +// information after the given prefix string, which is assumed to contain +// an HCL syntax representation of the value that errors are relative to. +func FormatErrorPrefixed(err error, prefix string) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return fmt.Sprintf("%s: %s", prefix, err.Error()) + } + + return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go new file mode 100644 index 000000000..59c06b70b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/contextual.go @@ -0,0 +1,372 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// The "contextual" family of diagnostics are designed to allow separating +// the detection of a problem from placing that problem in context. For +// example, some code that is validating an object extracted from configuration +// may not have access to the configuration that generated it, but can still +// report problems within that object which the caller can then place in +// context by calling IsConfigBody on the returned diagnostics. +// +// When contextual diagnostics are used, the documentation for a method must +// be very explicit about what context is implied for any diagnostics returned, +// to help ensure the expected result. + +// contextualFromConfig is an interface type implemented by diagnostic types +// that can elaborate themselves when given information about the configuration +// body they are embedded in. +// +// Usually this entails extracting source location information in order to +// populate the "Subject" range. +type contextualFromConfigBody interface { + ElaborateFromConfigBody(hcl.Body) Diagnostic +} + +// InConfigBody returns a copy of the receiver with any config-contextual +// diagnostics elaborated in the context of the given body. +func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { + if len(d) == 0 { + return nil + } + + ret := make(Diagnostics, len(d)) + for i, srcDiag := range d { + if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { + ret[i] = cd.ElaborateFromConfigBody(body) + } else { + ret[i] = srcDiag + } + } + + return ret +} + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +// ElaborateFromConfigBody finds the most accurate possible source location +// for a diagnostic's attribute path within the given body. +// +// Backing out from a path back to a source location is not always entirely +// possible because we lose some information in the decoding process, so +// if an exact position cannot be found then the returned diagnostic will +// refer to a position somewhere within the containing body, which is assumed +// to be better than no location at all. +// +// If possible it is generally better to report an error at a layer where +// source location information is still available, for more accuracy. This +// is not always possible due to system architecture, so this serves as a +// "best effort" fallback behavior for such situations. +func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if len(d.attrPath) < 1 { + // Should never happen, but we'll allow it rather than crashing. + return d + } + + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + + // This function will often end up re-decoding values that were already + // decoded by an earlier step. This is non-ideal but is architecturally + // more convenient than arranging for source location information to be + // propagated to every place in Terraform, and this happens only in the + // presence of errors where performance isn't a concern. + + traverse := d.attrPath[:] + final := d.attrPath[len(d.attrPath)-1] + + // Index should never be the first step + // as indexing of top blocks (such as resources & data sources) + // is handled elsewhere + if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + return &ret + } + + // Process index separately + idxStep, hasIdx := final.(cty.IndexStep) + if hasIdx { + final = d.attrPath[len(d.attrPath)-2] + traverse = d.attrPath[:len(d.attrPath)-1] + } + + // If we have more than one step after removing index + // then we'll first try to traverse to a child body + // corresponding to the requested path. + if len(traverse) > 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { + for i := 0; i < len(traverse); i++ { + step := traverse[i] + + switch tStep := step.(type) { + case cty.GetAttrStep: + + var next cty.PathStep + if i < (len(traverse) - 1) { + next = traverse[i+1] + } + + // Will be indexing into our result here? + var indexType cty.Type + var indexVal cty.Value + if nextIndex, ok := next.(cty.IndexStep); ok { + indexVal = nextIndex.Key + indexType = indexVal.Type() + i++ // skip over the index on subsequent iterations + } + + var blockLabelNames []string + if indexType == cty.String { + // Map traversal means we expect one label for the key. + blockLabelNames = []string{"key"} + } + + // For intermediate steps we expect to be referring to a child + // block, so we'll attempt decoding under that assumption. + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: tStep.Name, + LabelNames: blockLabelNames, + }, + }, + }) + if contentDiags.HasErrors() { + return body + } + filtered := make([]*hcl.Block, 0, len(content.Blocks)) + for _, block := range content.Blocks { + if block.Type == tStep.Name { + filtered = append(filtered, block) + } + } + if len(filtered) == 0 { + // Step doesn't refer to a block + continue + } + + switch indexType { + case cty.NilType: // no index at all + if len(filtered) != 1 { + return body + } + body = filtered[0].Body + case cty.Number: + var idx int + err := gocty.FromCtyValue(indexVal, &idx) + if err != nil || idx >= len(filtered) { + return body + } + body = filtered[idx].Body + case cty.String: + key := indexVal.AsString() + var block *hcl.Block + for _, candidate := range filtered { + if candidate.Labels[0] == key { + block = candidate + break + } + } + if block == nil { + // No block with this key, so we'll just indicate a + // missing item in the containing block. + return body + } + body = block.Body + default: + // Should never happen, because only string and numeric indices + // are supported by cty collections. + return body + } + + default: + // For any other kind of step, we'll just return our current body + // as the subject and accept that this is a little inaccurate. + return body + } + } + return body +} + +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, err := kvPair.Key.Value(nil) + if err != nil { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange + } + return attr.Expr.Range() +} + +func (d *attributeDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + rng := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &rng + return &ret +} + +func (d *wholeBodyDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go new file mode 100644 index 000000000..a7699cf01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic.go @@ -0,0 +1,40 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl/v2" +) + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source + + // FromExpr returns the expression-related context for the diagnostic, if + // available. Returns nil if the diagnostic is not related to an + // expression evaluation. + FromExpr() *FromExpr +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} + +type FromExpr struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go new file mode 100644 index 000000000..50bf9d8eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source and FromExpr that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func (d diagnosticBase) Source() Source { + return Source{} +} + +func (d diagnosticBase) FromExpr() *FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go new file mode 100644 index 000000000..a19fa80c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/diagnostics.go @@ -0,0 +1,318 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/v2" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case NonFatalError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + iSrc, jSrc := iD.Source(), jD.Source() + + switch { + + case iSev != jSev: + return iSev == Warning + + case (iSrc.Subject == nil) != (jSrc.Subject == nil): + return iSrc.Subject == nil + + case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: + iSubj := iSrc.Subject + jSubj := jSrc.Subject + switch { + case iSubj.Filename != jSubj.Filename: + // Path with fewer segments goes first if they are different lengths + sep := string(filepath.Separator) + iCount := strings.Count(iSubj.Filename, sep) + jCount := strings.Count(jSubj.Filename, sep) + if iCount != jCount { + return iCount < jCount + } + return iSubj.Filename < jSubj.Filename + case iSubj.Start.Byte != jSubj.Start.Byte: + return iSubj.Start.Byte < jSubj.Start.Byte + case iSubj.End.Byte != jSubj.End.Byte: + return iSubj.End.Byte < jSubj.End.Byte + } + fallthrough + + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go new file mode 100644 index 000000000..c427879eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go new file mode 100644 index 000000000..13f7a714f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/error.go @@ -0,0 +1,28 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} + +func (e nativeError) FromExpr() *FromExpr { + // Native errors are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go new file mode 100644 index 000000000..8c781611a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/hcl.go @@ -0,0 +1,87 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl/v2" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +func (d hclDiagnostic) FromExpr() *FromExpr { + if d.diag.Expression == nil || d.diag.EvalContext == nil { + return nil + } + return &FromExpr{ + Expression: d.diag.Expression, + EvalContext: d.diag.EvalContext, + } +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go new file mode 100644 index 000000000..485063b0c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/rpc_friendly.go @@ -0,0 +1,59 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func (d rpcFriendlyDiag) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go new file mode 100644 index 000000000..78a721068 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go new file mode 100644 index 000000000..b0f1ecd46 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/simple_warning.go @@ -0,0 +1,30 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a simple warning + return Source{} +} + +func (e simpleWarning) FromExpr() *FromExpr { + // Simple warnings are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go new file mode 100644 index 000000000..3031168d6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go new file mode 100644 index 000000000..eaa27373d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags/sourceless.go @@ -0,0 +1,13 @@ +package tfdiags + +// Sourceless creates and returns a diagnostic with no source location +// information. This is generally used for operational-type errors that are +// caused by or relate to the environment where Terraform is running rather +// than to the provided configuration. +func Sourceless(severity Severity, summary, detail string) Diagnostic { + return diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh new file mode 100644 index 000000000..de1d693ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/generate.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# We do not run protoc under go:generate because we want to ensure that all +# dependencies of go:generate are "go get"-able for general dev environment +# usability. To compile all protobuf files in this repository, run +# "make protobuf" at the top-level. + +set -eu + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +cd "$DIR" + +protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 000000000..86fd21e41 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,3518 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tfplugin5.proto + +package tfplugin5 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +var Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", +} + +var Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, +} + +func (x Diagnostic_Severity) String() string { + return proto.EnumName(Diagnostic_Severity_name, int32(x)) +} + +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +var Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", +} + +var Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) +} + +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{0} +} + +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (m *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(m, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +func (m *DynamicValue) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +type Diagnostic struct { + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1} +} + +func (m *Diagnostic) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Diagnostic.Unmarshal(m, b) +} +func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) +} +func (m *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(m, src) +} +func (m *Diagnostic) XXX_Size() int { + return xxx_messageInfo_Diagnostic.Size(m) +} +func (m *Diagnostic) XXX_DiscardUnknown() { + xxx_messageInfo_Diagnostic.DiscardUnknown(m) +} + +var xxx_messageInfo_Diagnostic proto.InternalMessageInfo + +func (m *Diagnostic) GetSeverity() Diagnostic_Severity { + if m != nil { + return m.Severity + } + return Diagnostic_INVALID +} + +func (m *Diagnostic) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Diagnostic) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Diagnostic) GetAttribute() *AttributePath { + if m != nil { + return m.Attribute + } + return nil +} + +type AttributePath struct { + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath) Reset() { *m = AttributePath{} } +func (m *AttributePath) String() string { return proto.CompactTextString(m) } +func (*AttributePath) ProtoMessage() {} +func (*AttributePath) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2} +} + +func (m *AttributePath) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath.Unmarshal(m, b) +} +func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) +} +func (m *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(m, src) +} +func (m *AttributePath) XXX_Size() int { + return xxx_messageInfo_AttributePath.Size(m) +} +func (m *AttributePath) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath proto.InternalMessageInfo + +func (m *AttributePath) GetSteps() []*AttributePath_Step { + if m != nil { + return m.Steps + } + return nil +} + +type AttributePath_Step struct { + // Types that are valid to be assigned to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } +func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } +func (*AttributePath_Step) ProtoMessage() {} +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2, 0} +} + +func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) +} +func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) +} +func (m *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(m, src) +} +func (m *AttributePath_Step) XXX_Size() int { + return xxx_messageInfo_AttributePath_Step.Size(m) +} +func (m *AttributePath_Step) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *AttributePath_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyString() string { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributePath_Step) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } +} + +type Stop struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop) Reset() { *m = Stop{} } +func (m *Stop) String() string { return proto.CompactTextString(m) } +func (*Stop) ProtoMessage() {} +func (*Stop) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3} +} + +func (m *Stop) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop.Unmarshal(m, b) +} +func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop.Marshal(b, m, deterministic) +} +func (m *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(m, src) +} +func (m *Stop) XXX_Size() int { + return xxx_messageInfo_Stop.Size(m) +} +func (m *Stop) XXX_DiscardUnknown() { + xxx_messageInfo_Stop.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop proto.InternalMessageInfo + +type Stop_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Request) Reset() { *m = Stop_Request{} } +func (m *Stop_Request) String() string { return proto.CompactTextString(m) } +func (*Stop_Request) ProtoMessage() {} +func (*Stop_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 0} +} + +func (m *Stop_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Request.Unmarshal(m, b) +} +func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) +} +func (m *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(m, src) +} +func (m *Stop_Request) XXX_Size() int { + return xxx_messageInfo_Stop_Request.Size(m) +} +func (m *Stop_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Request proto.InternalMessageInfo + +type Stop_Response struct { + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Response) Reset() { *m = Stop_Response{} } +func (m *Stop_Response) String() string { return proto.CompactTextString(m) } +func (*Stop_Response) ProtoMessage() {} +func (*Stop_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 1} +} + +func (m *Stop_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Response.Unmarshal(m, b) +} +func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) +} +func (m *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(m, src) +} +func (m *Stop_Response) XXX_Size() int { + return xxx_messageInfo_Stop_Response.Size(m) +} +func (m *Stop_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Response proto.InternalMessageInfo + +func (m *Stop_Response) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawState) Reset() { *m = RawState{} } +func (m *RawState) String() string { return proto.CompactTextString(m) } +func (*RawState) ProtoMessage() {} +func (*RawState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{4} +} + +func (m *RawState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawState.Unmarshal(m, b) +} +func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawState.Marshal(b, m, deterministic) +} +func (m *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(m, src) +} +func (m *RawState) XXX_Size() int { + return xxx_messageInfo_RawState.Size(m) +} +func (m *RawState) XXX_DiscardUnknown() { + xxx_messageInfo_RawState.DiscardUnknown(m) +} + +var xxx_messageInfo_RawState proto.InternalMessageInfo + +func (m *RawState) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +func (m *RawState) GetFlatmap() map[string]string { + if m != nil { + return m.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +type Schema_Block struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Block) Reset() { *m = Schema_Block{} } +func (m *Schema_Block) String() string { return proto.CompactTextString(m) } +func (*Schema_Block) ProtoMessage() {} +func (*Schema_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 0} +} + +func (m *Schema_Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Block.Unmarshal(m, b) +} +func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) +} +func (m *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(m, src) +} +func (m *Schema_Block) XXX_Size() int { + return xxx_messageInfo_Schema_Block.Size(m) +} +func (m *Schema_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Block proto.InternalMessageInfo + +func (m *Schema_Block) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema_Block) GetAttributes() []*Schema_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if m != nil { + return m.BlockTypes + } + return nil +} + +type Schema_Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } +func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } +func (*Schema_Attribute) ProtoMessage() {} +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 1} +} + +func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) +} +func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) +} +func (m *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(m, src) +} +func (m *Schema_Attribute) XXX_Size() int { + return xxx_messageInfo_Schema_Attribute.Size(m) +} +func (m *Schema_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo + +func (m *Schema_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schema_Attribute) GetType() []byte { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema_Attribute) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Attribute) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Schema_Attribute) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *Schema_Attribute) GetComputed() bool { + if m != nil { + return m.Computed + } + return false +} + +func (m *Schema_Attribute) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +type Schema_NestedBlock struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } +func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } +func (*Schema_NestedBlock) ProtoMessage() {} +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2} +} + +func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) +} +func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) +} +func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(m, src) +} +func (m *Schema_NestedBlock) XXX_Size() int { + return xxx_messageInfo_Schema_NestedBlock.Size(m) +} +func (m *Schema_NestedBlock) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo + +func (m *Schema_NestedBlock) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *Schema_NestedBlock) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if m != nil { + return m.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (m *Schema_NestedBlock) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema_NestedBlock) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +type GetProviderSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } +func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema) ProtoMessage() {} +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6} +} + +func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) +} +func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(m, src) +} +func (m *GetProviderSchema) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema.Size(m) +} +func (m *GetProviderSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo + +type GetProviderSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } +func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Request) ProtoMessage() {} +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 0} +} + +func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) +} +func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) +} +func (m *GetProviderSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Request.Size(m) +} +func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo + +type GetProviderSchema_Response struct { + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } +func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Response) ProtoMessage() {} +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 1} +} + +func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) +} +func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) +} +func (m *GetProviderSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Response.Size(m) +} +func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo + +func (m *GetProviderSchema_Response) GetProvider() *Schema { + if m != nil { + return m.Provider + } + return nil +} + +func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if m != nil { + return m.ResourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if m != nil { + return m.DataSourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type PrepareProviderConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } +func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig) ProtoMessage() {} +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7} +} + +func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) +} +func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(m, src) +} +func (m *PrepareProviderConfig) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig.Size(m) +} +func (m *PrepareProviderConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo + +type PrepareProviderConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } +func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Request) ProtoMessage() {} +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 0} +} + +func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) +} +func (m *PrepareProviderConfig_Request) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) +} +func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } +func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Response) ProtoMessage() {} +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 1} +} + +func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) +} +func (m *PrepareProviderConfig_Response) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) +} +func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if m != nil { + return m.PreparedConfig + } + return nil +} + +func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type UpgradeResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } +func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState) ProtoMessage() {} +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8} +} + +func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) +} +func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(m, src) +} +func (m *UpgradeResourceState) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState.Size(m) +} +func (m *UpgradeResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo + +type UpgradeResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } +func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Request) ProtoMessage() {} +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 0} +} + +func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) +} +func (m *UpgradeResourceState_Request) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Request.Size(m) +} +func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo + +func (m *UpgradeResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *UpgradeResourceState_Request) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *UpgradeResourceState_Request) GetRawState() *RawState { + if m != nil { + return m.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } +func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Response) ProtoMessage() {} +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 1} +} + +func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) +} +func (m *UpgradeResourceState_Response) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Response.Size(m) +} +func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo + +func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if m != nil { + return m.UpgradedState + } + return nil +} + +func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } +func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig) ProtoMessage() {} +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9} +} + +func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) +} +func (m *ValidateResourceTypeConfig) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) +} +func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo + +type ValidateResourceTypeConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } +func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 0} +} + +func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) +} +func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } +func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 1} +} + +func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) +} +func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } +func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig) ProtoMessage() {} +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10} +} + +func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) +} +func (m *ValidateDataSourceConfig) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig.Size(m) +} +func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo + +type ValidateDataSourceConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } +func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 0} +} + +func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) +} +func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } +func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 1} +} + +func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) +} +func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type Configure struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure) Reset() { *m = Configure{} } +func (m *Configure) String() string { return proto.CompactTextString(m) } +func (*Configure) ProtoMessage() {} +func (*Configure) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11} +} + +func (m *Configure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure.Unmarshal(m, b) +} +func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure.Marshal(b, m, deterministic) +} +func (m *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(m, src) +} +func (m *Configure) XXX_Size() int { + return xxx_messageInfo_Configure.Size(m) +} +func (m *Configure) XXX_DiscardUnknown() { + xxx_messageInfo_Configure.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure proto.InternalMessageInfo + +type Configure_Request struct { + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Request) Reset() { *m = Configure_Request{} } +func (m *Configure_Request) String() string { return proto.CompactTextString(m) } +func (*Configure_Request) ProtoMessage() {} +func (*Configure_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 0} +} + +func (m *Configure_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Request.Unmarshal(m, b) +} +func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) +} +func (m *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(m, src) +} +func (m *Configure_Request) XXX_Size() int { + return xxx_messageInfo_Configure_Request.Size(m) +} +func (m *Configure_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Request proto.InternalMessageInfo + +func (m *Configure_Request) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Configure_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type Configure_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Response) Reset() { *m = Configure_Response{} } +func (m *Configure_Response) String() string { return proto.CompactTextString(m) } +func (*Configure_Response) ProtoMessage() {} +func (*Configure_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 1} +} + +func (m *Configure_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Response.Unmarshal(m, b) +} +func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) +} +func (m *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(m, src) +} +func (m *Configure_Response) XXX_Size() int { + return xxx_messageInfo_Configure_Response.Size(m) +} +func (m *Configure_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Response proto.InternalMessageInfo + +func (m *Configure_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource) Reset() { *m = ReadResource{} } +func (m *ReadResource) String() string { return proto.CompactTextString(m) } +func (*ReadResource) ProtoMessage() {} +func (*ReadResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12} +} + +func (m *ReadResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource.Unmarshal(m, b) +} +func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) +} +func (m *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(m, src) +} +func (m *ReadResource) XXX_Size() int { + return xxx_messageInfo_ReadResource.Size(m) +} +func (m *ReadResource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource proto.InternalMessageInfo + +type ReadResource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } +func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Request) ProtoMessage() {} +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 0} +} + +func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) +} +func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) +} +func (m *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(m, src) +} +func (m *ReadResource_Request) XXX_Size() int { + return xxx_messageInfo_ReadResource_Request.Size(m) +} +func (m *ReadResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo + +func (m *ReadResource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadResource_Request) GetCurrentState() *DynamicValue { + if m != nil { + return m.CurrentState + } + return nil +} + +func (m *ReadResource_Request) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ReadResource_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } +func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Response) ProtoMessage() {} +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 1} +} + +func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) +} +func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) +} +func (m *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(m, src) +} +func (m *ReadResource_Response) XXX_Size() int { + return xxx_messageInfo_ReadResource_Response.Size(m) +} +func (m *ReadResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo + +func (m *ReadResource_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ReadResource_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type PlanResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } +func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange) ProtoMessage() {} +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13} +} + +func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) +} +func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(m, src) +} +func (m *PlanResourceChange) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange.Size(m) +} +func (m *PlanResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo + +type PlanResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } +func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Request) ProtoMessage() {} +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 0} +} + +func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) +} +func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) +} +func (m *PlanResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Request.Size(m) +} +func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo + +func (m *PlanResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if m != nil { + return m.ProposedNewState + } + return nil +} + +func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { + if m != nil { + return m.PriorPrivate + } + return nil +} + +type PlanResourceChange_Response struct { + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } +func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Response) ProtoMessage() {} +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 1} +} + +func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) +} +func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) +} +func (m *PlanResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Response.Size(m) +} +func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo + +func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if m != nil { + return m.RequiresReplace + } + return nil +} + +func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } +func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange) ProtoMessage() {} +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14} +} + +func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) +} +func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(m, src) +} +func (m *ApplyResourceChange) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange.Size(m) +} +func (m *ApplyResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo + +type ApplyResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } +func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Request) ProtoMessage() {} +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 0} +} + +func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) +} +func (m *ApplyResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Request.Size(m) +} +func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo + +func (m *ApplyResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +type ApplyResourceChange_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } +func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Response) ProtoMessage() {} +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 1} +} + +func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) +} +func (m *ApplyResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Response.Size(m) +} +func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo + +func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ApplyResourceChange_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ImportResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } +func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState) ProtoMessage() {} +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15} +} + +func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) +} +func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) +} +func (m *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(m, src) +} +func (m *ImportResourceState) XXX_Size() int { + return xxx_messageInfo_ImportResourceState.Size(m) +} +func (m *ImportResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo + +type ImportResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } +func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Request) ProtoMessage() {} +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 0} +} + +func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) +} +func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(m, src) +} +func (m *ImportResourceState_Request) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Request.Size(m) +} +func (m *ImportResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo + +func (m *ImportResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_Request) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } +func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_ImportedResource) ProtoMessage() {} +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 1} +} + +func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) +} +func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) +} +func (m *ImportResourceState_ImportedResource) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) +} +func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo + +func (m *ImportResourceState_ImportedResource) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ImportResourceState_Response struct { + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } +func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Response) ProtoMessage() {} +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 2} +} + +func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) +} +func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(m, src) +} +func (m *ImportResourceState_Response) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Response.Size(m) +} +func (m *ImportResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo + +func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if m != nil { + return m.ImportedResources + } + return nil +} + +func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadDataSource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } +func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource) ProtoMessage() {} +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16} +} + +func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) +} +func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) +} +func (m *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(m, src) +} +func (m *ReadDataSource) XXX_Size() int { + return xxx_messageInfo_ReadDataSource.Size(m) +} +func (m *ReadDataSource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo + +type ReadDataSource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } +func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Request) ProtoMessage() {} +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 0} +} + +func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) +} +func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(m, src) +} +func (m *ReadDataSource_Request) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Request.Size(m) +} +func (m *ReadDataSource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo + +func (m *ReadDataSource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadDataSource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ReadDataSource_Response struct { + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } +func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Response) ProtoMessage() {} +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 1} +} + +func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) +} +func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(m, src) +} +func (m *ReadDataSource_Response) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Response.Size(m) +} +func (m *ReadDataSource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo + +func (m *ReadDataSource_Response) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type GetProvisionerSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } +func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema) ProtoMessage() {} +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17} +} + +func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) +} +func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(m, src) +} +func (m *GetProvisionerSchema) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema.Size(m) +} +func (m *GetProvisionerSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo + +type GetProvisionerSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } +func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Request) ProtoMessage() {} +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 0} +} + +func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) +} +func (m *GetProvisionerSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) +} +func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo + +type GetProvisionerSchema_Response struct { + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } +func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Response) ProtoMessage() {} +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 1} +} + +func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) +} +func (m *GetProvisionerSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) +} +func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo + +func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if m != nil { + return m.Provisioner + } + return nil +} + +func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } +func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig) ProtoMessage() {} +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18} +} + +func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) +} +func (m *ValidateProvisionerConfig) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig.Size(m) +} +func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo + +type ValidateProvisionerConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } +func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 0} +} + +func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) +} +func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } +func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 1} +} + +func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) +} +func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ProvisionResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } +func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource) ProtoMessage() {} +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19} +} + +func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) +} +func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) +} +func (m *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(m, src) +} +func (m *ProvisionResource) XXX_Size() int { + return xxx_messageInfo_ProvisionResource.Size(m) +} +func (m *ProvisionResource) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo + +type ProvisionResource_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } +func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Request) ProtoMessage() {} +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 0} +} + +func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) +} +func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(m, src) +} +func (m *ProvisionResource_Request) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Request.Size(m) +} +func (m *ProvisionResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo + +func (m *ProvisionResource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ProvisionResource_Request) GetConnection() *DynamicValue { + if m != nil { + return m.Connection + } + return nil +} + +type ProvisionResource_Response struct { + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } +func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Response) ProtoMessage() {} +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 1} +} + +func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) +} +func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(m, src) +} +func (m *ProvisionResource_Response) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Response.Size(m) +} +func (m *ProvisionResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo + +func (m *ProvisionResource_Response) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") +} + +func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) } + +var fileDescriptor_17ae6090ff270234 = []byte{ + // 1880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49, + 0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79, + 0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66, + 0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75, + 0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0, + 0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5, + 0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf, + 0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21, + 0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2, + 0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97, + 0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b, + 0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65, + 0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38, + 0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0, + 0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9, + 0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23, + 0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1, + 0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec, + 0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43, + 0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8, + 0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1, + 0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0, + 0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0, + 0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab, + 0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7, + 0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38, + 0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31, + 0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3, + 0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90, + 0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e, + 0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52, + 0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4, + 0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8, + 0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6, + 0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80, + 0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35, + 0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9, + 0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23, + 0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34, + 0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87, + 0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a, + 0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e, + 0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba, + 0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3, + 0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5, + 0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f, + 0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48, + 0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2, + 0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda, + 0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa, + 0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e, + 0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1, + 0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42, + 0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25, + 0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea, + 0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27, + 0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09, + 0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2, + 0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c, + 0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb, + 0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76, + 0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e, + 0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0, + 0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d, + 0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64, + 0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde, + 0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2, + 0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4, + 0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79, + 0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39, + 0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3, + 0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7, + 0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9, + 0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60, + 0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54, + 0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b, + 0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39, + 0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00, + 0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1, + 0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2, + 0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa, + 0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71, + 0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00, + 0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd, + 0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7, + 0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe, + 0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87, + 0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96, + 0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c, + 0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5, + 0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1, + 0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e, + 0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c, + 0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6, + 0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb, + 0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9, + 0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96, + 0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42, + 0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf, + 0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d, + 0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5, + 0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d, + 0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0, + 0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8, + 0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2, + 0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8, + 0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80, + 0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a, + 0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86, + 0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27, + 0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86, + 0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9, + 0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30, + 0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5, + 0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d, + 0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc, + 0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + //////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc *grpc.ClientConn +} + +func NewProviderClient(cc *grpc.ClientConn) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + //////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProviderServer can be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc *grpc.ClientConn +} + +func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 000000000..9875d9ba6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1,353 @@ +// Terraform Plugin RPC protocol version 5.1 +// +// This file defines version 5.1 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will be updated in-place in the source Terraform repository for +// any minor versions of protocol 5, but later minor versions will always be +// backwards compatible. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// +syntax = "proto3"; + +package tfplugin5; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message Stop { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + } + + message Attribute { + string name = 1; + bytes type = 2; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +service Provider { + //////// Information about what a provider supports/expects + rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); + rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); + rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc Configure(Configure.Request) returns (Configure.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + //////// Graceful Shutdown + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + } +} + +message PrepareProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + DynamicValue prepared_config = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceTypeConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataSourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message Configure { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + message Request { + string type_name = 1; + DynamicValue current_state = 2; + bytes private = 3; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + bytes private = 3; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +service Provisioner { + rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); + rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); + rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProvisionerSchema { + message Request { + } + message Response { + Schema provisioner = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateProvisionerConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ProvisionResource { + message Request { + DynamicValue config = 1; + DynamicValue connection = 2; + } + message Response { + string output = 1; + repeated Diagnostic diagnostics = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go new file mode 100644 index 000000000..2d56dab69 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/version/version.go @@ -0,0 +1,36 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This package should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var Version = "0.12.7" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "sdk" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(Version)) +} + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go new file mode 100644 index 000000000..e64e224b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/meta/meta.go @@ -0,0 +1,36 @@ +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var SDKVersion = "1.6.0" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var SDKPrerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go new file mode 100644 index 000000000..5a99e9006 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/client.go @@ -0,0 +1,35 @@ +package plugin + +import ( + "os" + "os/exec" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" +) + +// ClientConfig returns a configuration object that can be used to instantiate +// a client for the plugin described by the given metadata. +func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { + logger := hclog.New(&hclog.LoggerOptions{ + Name: "plugin", + Level: hclog.Trace, + Output: os.Stderr, + }) + + return &plugin.ClientConfig{ + Cmd: exec.Command(m.Path), + HandshakeConfig: Handshake, + VersionedPlugins: VersionedPlugins, + Managed: true, + Logger: logger, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + AutoMTLS: true, + } +} + +// Client returns a plugin client for the plugin described by the given metadata. +func Client(m discovery.PluginMeta) *plugin.Client { + return plugin.NewClient(ClientConfig(m)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go new file mode 100644 index 000000000..e4520975c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provider.go @@ -0,0 +1,563 @@ +package plugin + +import ( + "context" + "errors" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform provioders types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetSchemaResponse +} + +// getSchema is used internally to get the saved provider schema. The schema +// should have already been fetched from the provider, but we have to +// synchronize access to avoid being called concurrently with GetSchema. +func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { + p.mu.Lock() + // unlock inline in case GetSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + // the schema should have been fetched already, but give it another shot + // just in case things are being called out of order. This may happen for + // tests. + schemas := p.GetSchema() + if schemas.Diagnostics.HasErrors() { + panic(schemas.Diagnostics.Err()) + } + + return schemas +} + +// getResourceSchema is a helper to extract the schema for a resource, and +// panics if the schema is not available. +func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { + schema := p.getSchema() + resSchema, ok := schema.ResourceTypes[name] + if !ok { + panic("unknown resource type " + name) + } + return resSchema +} + +// gettDatasourceSchema is a helper to extract the schema for a datasource, and +// panics if that schema is not available. +func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { + schema := p.getSchema() + dataSchema, ok := schema.DataSources[name] + if !ok { + panic("unknown data source " + name) + } + return dataSchema +} + +func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { + log.Printf("[TRACE] GRPCProvider: GetSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") + + schema := p.getSchema() + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + config := cty.NullVal(ty) + if protoResp.PreparedConfig != nil { + config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") + resourceSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") + + resSchema := p.getResourceSchema(r.TypeName) + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.UpgradedState != nil { + state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + + resp.UpgradedState = state + return resp +} + +func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { + log.Printf("[TRACE] GRPCProvider: Configure") + + schema := p.getSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: r.TerraformVersion, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + log.Printf("[TRACE] GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadResource") + + resSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: PlanResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.PlannedState != nil { + state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: ImportResourceState") + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema := p.getResourceSchema(resource.TypeName) + state := cty.NullVal(resSchema.Block.ImpliedType()) + if imported.State != nil { + state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadDataSource") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(dataSchema.Block.ImpliedType()) + if protoResp.State != nil { + state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + log.Printf("[TRACE] GRPCProvider: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go new file mode 100644 index 000000000..c0e6f549a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/grpc_provisioner.go @@ -0,0 +1,178 @@ +package plugin + +import ( + "context" + "errors" + "io" + "log" + "sync" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/convert" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. +type GRPCProvisionerPlugin struct { + plugin.Plugin + GRPCProvisioner func() proto.ProvisionerServer +} + +func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvisioner{ + client: proto.NewProvisionerClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) + return nil +} + +// provisioners.Interface grpc implementation +type GRPCProvisioner struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + client proto.ProvisionerClient + ctx context.Context + + // Cache the schema since we need it for serialization in each method call. + mu sync.Mutex + schema *configschema.Block +} + +func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.schema != nil { + return provisioners.GetSchemaResponse{ + Provisioner: p.schema, + } + } + + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provisioner == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) + return resp + } + + resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) + + p.schema = resp.Provisioner + + return resp +} + +func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateProvisionerConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // connection is always assumed to be a simple string map + connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ProvisionResource_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + Connection: &proto.DynamicValue{Msgpack: connMP}, + } + + outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + for { + rcv, err := outputClient.Recv() + if rcv != nil { + r.UIOutput.Output(rcv.Output) + } + if err != nil { + if err != io.EOF { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + break + } + + if len(rcv.Diagnostics) > 0 { + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) + break + } + } + + return resp +} + +func (p *GRPCProvisioner) Stop() error { + protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) + if err != nil { + return err + } + if protoResp.Error != "" { + return errors.New(protoResp.Error) + } + return nil +} + +func (p *GRPCProvisioner) Close() error { + // check this since it's not automatically inserted during plugin creation + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go new file mode 100644 index 000000000..e4fb57761 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/plugin.go @@ -0,0 +1,14 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" +) + +// See serve.go for serving plugins + +var VersionedPlugins = map[int]plugin.PluginSet{ + 5: { + "provider": &GRPCProviderPlugin{}, + "provisioner": &GRPCProvisionerPlugin{}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go new file mode 100644 index 000000000..bfd62e2e9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/resource_provider.go @@ -0,0 +1,620 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// ResourceProviderPlugin is the plugin.Plugin implementation. +type ResourceProviderPlugin struct { + ResourceProvider func() terraform.ResourceProvider +} + +func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProviderServer{ + Broker: b, + Provider: p.ResourceProvider(), + }, nil +} + +func (p *ResourceProviderPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvider{Broker: b, Client: c}, nil +} + +// ResourceProvider is an implementation of terraform.ResourceProvider +// that communicates over RPC. +type ResourceProvider struct { + Broker *plugin.MuxBroker + Client *rpc.Client +} + +func (p *ResourceProvider) Stop() error { + var resp ResourceProviderStopResponse + err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + var result ResourceProviderGetSchemaResponse + args := &ResourceProviderGetSchemaArgs{ + Req: req, + } + + err := p.Client.Call("Plugin.GetSchema", args, &result) + if err != nil { + return nil, err + } + + if result.Error != nil { + err = result.Error + } + + return result.Schema, err +} + +func (p *ResourceProvider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + id := p.Broker.NextId() + go p.Broker.AcceptAndServe(id, &UIInputServer{ + UIInput: input, + }) + + var resp ResourceProviderInputResponse + args := ResourceProviderInputArgs{ + InputId: id, + Config: c, + } + + err := p.Client.Call("Plugin.Input", &args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + return nil, err + } + + return resp.Config, nil +} + +func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResponse + args := ResourceProviderValidateArgs{ + Config: c, + } + + err := p.Client.Call("Plugin.Validate", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateResource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { + var resp ResourceProviderConfigureResponse + err := p.Client.Call("Plugin.Configure", c, &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderApplyResponse + args := &ResourceProviderApplyArgs{ + Info: info, + State: s, + Diff: d, + } + + err := p.Client.Call("Plugin.Apply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderDiffResponse + args := &ResourceProviderDiffArgs{ + Info: info, + State: s, + Config: c, + } + err := p.Client.Call("Plugin.Diff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + var resp ResourceProviderRefreshResponse + args := &ResourceProviderRefreshArgs{ + Info: info, + State: s, + } + + err := p.Client.Call("Plugin.Refresh", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + var resp ResourceProviderImportStateResponse + args := &ResourceProviderImportStateArgs{ + Info: info, + Id: id, + } + + err := p.Client.Call("Plugin.ImportState", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Resources() []terraform.ResourceType { + var result []terraform.ResourceType + + err := p.Client.Call("Plugin.Resources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderReadDataDiffResponse + args := &ResourceProviderReadDataDiffArgs{ + Info: info, + Config: c, + } + + err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderReadDataApplyResponse + args := &ResourceProviderReadDataApplyArgs{ + Info: info, + Diff: d, + } + + err := p.Client.Call("Plugin.ReadDataApply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) DataSources() []terraform.DataSource { + var result []terraform.DataSource + + err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) Close() error { + return p.Client.Close() +} + +// ResourceProviderServer is a net/rpc compatible structure for serving +// a ResourceProvider. This should not be used directly. +type ResourceProviderServer struct { + Broker *plugin.MuxBroker + Provider terraform.ResourceProvider +} + +type ResourceProviderStopResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderGetSchemaArgs struct { + Req *terraform.ProviderSchemaRequest +} + +type ResourceProviderGetSchemaResponse struct { + Schema *terraform.ProviderSchema + Error *plugin.BasicError +} + +type ResourceProviderConfigureResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderInputArgs struct { + InputId uint32 + Config *terraform.ResourceConfig +} + +type ResourceProviderInputResponse struct { + Config *terraform.ResourceConfig + Error *plugin.BasicError +} + +type ResourceProviderApplyArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Diff *terraform.InstanceDiff +} + +type ResourceProviderApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderDiffArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Config *terraform.ResourceConfig +} + +type ResourceProviderDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderRefreshArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState +} + +type ResourceProviderRefreshResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderImportStateArgs struct { + Info *terraform.InstanceInfo + Id string +} + +type ResourceProviderImportStateResponse struct { + State []*terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataApplyArgs struct { + Info *terraform.InstanceInfo + Diff *terraform.InstanceDiff +} + +type ResourceProviderReadDataApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataDiffArgs struct { + Info *terraform.InstanceInfo + Config *terraform.ResourceConfig +} + +type ResourceProviderReadDataDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderValidateArgs struct { + Config *terraform.ResourceConfig +} + +type ResourceProviderValidateResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +type ResourceProviderValidateResourceArgs struct { + Config *terraform.ResourceConfig + Type string +} + +type ResourceProviderValidateResourceResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +func (s *ResourceProviderServer) Stop( + _ interface{}, + reply *ResourceProviderStopResponse) error { + err := s.Provider.Stop() + *reply = ResourceProviderStopResponse{ + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) GetSchema( + args *ResourceProviderGetSchemaArgs, + result *ResourceProviderGetSchemaResponse, +) error { + schema, err := s.Provider.GetSchema(args.Req) + result.Schema = schema + if err != nil { + result.Error = plugin.NewBasicError(err) + } + return nil +} + +func (s *ResourceProviderServer) Input( + args *ResourceProviderInputArgs, + reply *ResourceProviderInputResponse) error { + conn, err := s.Broker.Dial(args.InputId) + if err != nil { + *reply = ResourceProviderInputResponse{ + Error: plugin.NewBasicError(err), + } + return nil + } + client := rpc.NewClient(conn) + defer client.Close() + + input := &UIInput{Client: client} + + config, err := s.Provider.Input(input, args.Config) + *reply = ResourceProviderInputResponse{ + Config: config, + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) Validate( + args *ResourceProviderValidateArgs, + reply *ResourceProviderValidateResponse) error { + warns, errs := s.Provider.Validate(args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ValidateResource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateResource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) Configure( + config *terraform.ResourceConfig, + reply *ResourceProviderConfigureResponse) error { + err := s.Provider.Configure(config) + *reply = ResourceProviderConfigureResponse{ + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Apply( + args *ResourceProviderApplyArgs, + result *ResourceProviderApplyResponse) error { + state, err := s.Provider.Apply(args.Info, args.State, args.Diff) + *result = ResourceProviderApplyResponse{ + State: state, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Diff( + args *ResourceProviderDiffArgs, + result *ResourceProviderDiffResponse) error { + diff, err := s.Provider.Diff(args.Info, args.State, args.Config) + *result = ResourceProviderDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Refresh( + args *ResourceProviderRefreshArgs, + result *ResourceProviderRefreshResponse) error { + newState, err := s.Provider.Refresh(args.Info, args.State) + *result = ResourceProviderRefreshResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ImportState( + args *ResourceProviderImportStateArgs, + result *ResourceProviderImportStateResponse) error { + states, err := s.Provider.ImportState(args.Info, args.Id) + *result = ResourceProviderImportStateResponse{ + State: states, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Resources( + nothing interface{}, + result *[]terraform.ResourceType) error { + *result = s.Provider.Resources() + return nil +} + +func (s *ResourceProviderServer) ValidateDataSource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ReadDataDiff( + args *ResourceProviderReadDataDiffArgs, + result *ResourceProviderReadDataDiffResponse) error { + diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) + *result = ResourceProviderReadDataDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ReadDataApply( + args *ResourceProviderReadDataApplyArgs, + result *ResourceProviderReadDataApplyResponse) error { + newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) + *result = ResourceProviderReadDataApplyResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) DataSources( + nothing interface{}, + result *[]terraform.DataSource) error { + *result = s.Provider.DataSources() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go new file mode 100644 index 000000000..cbe9fc636 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/serve.go @@ -0,0 +1,100 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. This should be bumped whenever a change happens in + // one or the other that makes it so that they can't safely communicate. + // This could be adding a new interface value, it could be how + // helper/schema computes diffs, etc. + ProtocolVersion: DefaultProtocolVersion, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() terraform.ResourceProvider +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) + // this is almost always going to be a *schema.Provider, but check that + // we got back a valid provider just in case. + if provider != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return provider + } + } + } + + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, + }) +} + +// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring +// a plugin server or client. +func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { + return map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{ + ResourceProvider: opts.ProviderFunc, + }, + } +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + // Set the legacy netrpc plugins at version 4. + // The oldest version is returned in when executed by a legacy go-plugin + // client. + plugins := map[int]plugin.PluginSet{ + 4: legacyPluginMap(opts), + } + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + } + return plugins +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go new file mode 100644 index 000000000..b24b03ebf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_input.go @@ -0,0 +1,52 @@ +package plugin + +import ( + "context" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// UIInput is an implementation of terraform.UIInput that communicates +// over RPC. +type UIInput struct { + Client *rpc.Client +} + +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + var resp UIInputInputResponse + err := i.Client.Call("Plugin.Input", opts, &resp) + if err != nil { + return "", err + } + if resp.Error != nil { + err = resp.Error + return "", err + } + + return resp.Value, nil +} + +type UIInputInputResponse struct { + Value string + Error *plugin.BasicError +} + +// UIInputServer is a net/rpc compatible structure for serving +// a UIInputServer. This should not be used directly. +type UIInputServer struct { + UIInput terraform.UIInput +} + +func (s *UIInputServer) Input( + opts *terraform.InputOpts, + reply *UIInputInputResponse) error { + value, err := s.UIInput.Input(context.Background(), opts) + *reply = UIInputInputResponse{ + Value: value, + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go new file mode 100644 index 000000000..07c13d03a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/plugin/ui_output.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/terraform-plugin-sdk/terraform" +) + +// UIOutput is an implementatin of terraform.UIOutput that communicates +// over RPC. +type UIOutput struct { + Client *rpc.Client +} + +func (o *UIOutput) Output(v string) { + o.Client.Call("Plugin.Output", v, new(interface{})) +} + +// UIOutputServer is the RPC server for serving UIOutput. +type UIOutputServer struct { + UIOutput terraform.UIOutput +} + +func (s *UIOutputServer) Output( + v string, + reply *interface{}) error { + s.UIOutput.Output(v) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go new file mode 100644 index 000000000..eb05c68ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context.go @@ -0,0 +1,882 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/states/statefile" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// InputMode defines what sort of input will be asked for when Input +// is called on Context. +type InputMode byte + +const ( + // InputModeVar asks for all variables + InputModeVar InputMode = 1 << iota + + // InputModeVarUnset asks for variables which are not set yet. + // InputModeVar must be set for this to have an effect. + InputModeVarUnset + + // InputModeProvider asks for provider variables + InputModeProvider + + // InputModeStd is the standard operating mode and asks for both variables + // and providers. + InputModeStd = InputModeVar | InputModeProvider +) + +// ContextOpts are the user-configurable options to create a context with +// NewContext. +type ContextOpts struct { + Config *configs.Config + Changes *plans.Changes + State *states.State + Targets []addrs.Targetable + Variables InputValues + Meta *ContextMeta + Destroy bool + + Hooks []Hook + Parallelism int + ProviderResolver providers.Resolver + Provisioners map[string]ProvisionerFactory + + // If non-nil, will apply as additional constraints on the provider + // plugins that will be requested from the provider resolver. + ProviderSHA256s map[string][]byte + SkipProviderVerify bool + + UIInput UIInput +} + +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into Terraform in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment +} + +// Context represents all the context that Terraform needs in order to +// perform operations on infrastructure. This structure is built using +// NewContext. +type Context struct { + config *configs.Config + changes *plans.Changes + state *states.State + targets []addrs.Targetable + variables InputValues + meta *ContextMeta + destroy bool + + hooks []Hook + components contextComponentFactory + schemas *Schemas + sh *stopHook + uiInput UIInput + + l sync.Mutex // Lock acquired during any task + parallelSem Semaphore + providerInputConfig map[string]map[string]cty.Value + providerSHA256s map[string][]byte + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + shadowErr error +} + +// (additional methods on Context can be found in context_*.go files.) + +// NewContext creates a new Context structure. +// +// Once a Context is created, the caller must not access or mutate any of +// the objects referenced (directly or indirectly) by the ContextOpts fields. +// +// If the returned diagnostics contains errors then the resulting context is +// invalid and must not be used. +func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform.NewContext: starting") + diags := CheckCoreVersionRequirements(opts.Config) + // If version constraints are not met then we'll bail early since otherwise + // we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return nil, diags + } + + // Copy all the hooks and add our stop hook. We don't append directly + // to the Config so that we're not modifying that in-place. + sh := new(stopHook) + hooks := make([]Hook, len(opts.Hooks)+1) + copy(hooks, opts.Hooks) + hooks[len(opts.Hooks)] = sh + + state := opts.State + if state == nil { + state = states.NewState() + } + + // Determine parallelism, default to 10. We do this both to limit + // CPU pressure but also to have an extra guard against rate throttling + // from providers. + par := opts.Parallelism + if par == 0 { + par = 10 + } + + // Set up the variables in the following sequence: + // 0 - Take default values from the configuration + // 1 - Take values from TF_VAR_x environment variables + // 2 - Take values specified in -var flags, overriding values + // set by environment variables if necessary. This includes + // values taken from -var-file in addition. + var variables InputValues + if opts.Config != nil { + // Default variables from the configuration seed our map. + variables = DefaultVariableValues(opts.Config.Module.Variables) + } + // Variables provided by the caller (from CLI, environment, etc) can + // override the defaults. + variables = variables.Override(opts.Variables) + + // Bind available provider plugins to the constraints in config + var providerFactories map[string]providers.Factory + if opts.ProviderResolver != nil { + deps := ConfigTreeDependencies(opts.Config, state) + reqd := deps.AllPluginRequirements() + if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { + reqd.LockExecutables(opts.ProviderSHA256s) + } + log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") + + var providerDiags tfdiags.Diagnostics + providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) + diags = diags.Append(providerDiags) + + if diags.HasErrors() { + return nil, diags + } + } else { + providerFactories = make(map[string]providers.Factory) + } + + components := &basicComponentFactory{ + providers: providerFactories, + provisioners: opts.Provisioners, + } + + log.Printf("[TRACE] terraform.NewContext: loading provider schemas") + schemas, err := LoadSchemas(opts.Config, opts.State, components) + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + changes := opts.Changes + if changes == nil { + changes = plans.NewChanges() + } + + config := opts.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + log.Printf("[TRACE] terraform.NewContext: complete") + + return &Context{ + components: components, + schemas: schemas, + destroy: opts.Destroy, + changes: changes, + hooks: hooks, + meta: opts.Meta, + config: config, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, + + parallelSem: NewSemaphore(par), + providerInputConfig: make(map[string]map[string]cty.Value), + providerSHA256s: opts.ProviderSHA256s, + sh: sh, + }, nil +} + +func (c *Context) Schemas() *Schemas { + return c.schemas +} + +type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). + Validate bool + + // Legacy graphs only: won't prune the graph + Verbose bool +} + +// Graph returns the graph used for the given operation type. +// +// The most extensive or complex graph type is GraphTypePlan. +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { + if opts == nil { + opts = &ContextGraphOpts{Validate: true} + } + + log.Printf("[INFO] terraform: building graph: %s", typ) + switch typ { + case GraphTypeApply: + return (&ApplyGraphBuilder{ + Config: c.config, + Changes: c.changes, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeValidate: + // The validate graph is just a slightly modified plan graph + fallthrough + case GraphTypePlan: + // Create the plan graph builder + p := &PlanGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + } + + // Some special cases for other graph types shared with plan currently + var b GraphBuilder = p + switch typ { + case GraphTypeValidate: + b = ValidateGraphBuilder(p) + } + + return b.Build(addrs.RootModuleInstance) + + case GraphTypePlanDestroy: + return (&DestroyPlanGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeRefresh: + return (&RefreshGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeEval: + return (&EvalGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + }).Build(addrs.RootModuleInstance) + + default: + // Should never happen, because the above is exhaustive for all graph types. + panic(fmt.Errorf("unsupported graph type %s", typ)) + } +} + +// ShadowError returns any errors caught during a shadow operation. +// +// A shadow operation is an operation run in parallel to a real operation +// that performs the same tasks using new logic on copied state. The results +// are compared to ensure that the new logic works the same as the old logic. +// The shadow never affects the real operation or return values. +// +// The result of the shadow operation are only available through this function +// call after a real operation is complete. +// +// For API consumers of Context, you can safely ignore this function +// completely if you have no interest in helping report experimental feature +// errors to Terraform maintainers. Otherwise, please call this function +// after every operation and report this to the user. +// +// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect +// the real state or result of a real operation. They are purely informational +// to assist in future Terraform versions being more stable. Please message +// this effectively to the end user. +// +// This must be called only when no other operation is running (refresh, +// plan, etc.). The result can be used in parallel to any other operation +// running. +func (c *Context) ShadowError() error { + return c.shadowErr +} + +// State returns a copy of the current state associated with this context. +// +// This cannot safely be called in parallel with any other Context function. +func (c *Context) State() *states.State { + return c.state.DeepCopy() +} + +// Eval produces a scope in which expressions can be evaluated for +// the given module path. +// +// This method must first evaluate any ephemeral values (input variables, local +// values, and output values) in the configuration. These ephemeral values are +// not included in the persisted state, so they must be re-computed using other +// values in the state before they can be properly evaluated. The updated +// values are retained in the main state associated with the receiving context. +// +// This function takes no action against remote APIs but it does need access +// to all provider and provisioner instances in order to obtain their schemas +// for type checking. +// +// The result is an evaluation scope that can be used to resolve references +// against the root module. If the returned diagnostics contains errors then +// the returned scope may be nil. If it is not nil then it may still be used +// to attempt expression evaluation or other analysis, but some expressions +// may not behave as expected. +func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { + // This is intended for external callers such as the "terraform console" + // command. Internally, we create an evaluator in c.walk before walking + // the graph, and create scopes in ContextGraphWalker. + + var diags tfdiags.Diagnostics + defer c.acquireRun("eval")() + + // Start with a copy of state so that we don't affect any instances + // that other methods may have already returned. + c.state = c.state.DeepCopy() + var walker *ContextGraphWalker + + graph, graphDiags := c.Graph(GraphTypeEval, nil) + diags = diags.Append(graphDiags) + if !diags.HasErrors() { + var walkDiags tfdiags.Diagnostics + walker, walkDiags = c.walk(graph, walkEval) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + } + + if walker == nil { + // If we skipped walking the graph (due to errors) then we'll just + // use a placeholder graph walker here, which'll refer to the + // unmodified state. + walker = c.graphWalker(walkEval) + } + + // This is a bit weird since we don't normally evaluate outside of + // the context of a walk, but we'll "re-enter" our desired path here + // just to get hold of an EvalContext for it. GraphContextBuiltin + // caches its contexts, so we should get hold of the context that was + // previously used for evaluation here, unless we skipped walking. + evalCtx := walker.EnterPath(path) + return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags +} + +// Apply applies the changes represented by this context and returns +// the resulting state. +// +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. +func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { + defer c.acquireRun("apply")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, diags := c.Graph(GraphTypeApply, nil) + if diags.HasErrors() { + return nil, diags + } + + // Determine the operation + operation := walkApply + if c.destroy { + operation = walkDestroy + } + + // Walk the graph + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + if c.destroy && !diags.HasErrors() { + // If we know we were trying to destroy objects anyway, and we + // completed without any errors, then we'll also prune out any + // leftover empty resource husks (left after all of the instances + // of a resource with "count" or "for_each" are destroyed) to + // help ensure we end up with an _actually_ empty state, assuming + // we weren't destroying with -target here. + // + // (This doesn't actually take into account -target, but that should + // be okay because it doesn't throw away anything we can't recompute + // on a subsequent "terraform plan" run, if the resources are still + // present in the configuration. However, this _will_ cause "count = 0" + // resources to read as unknown during the next refresh walk, which + // may cause some additional churn if used in a data resource or + // provider block, until we remove refreshing as a separate walk and + // just do it as part of the plan walk.) + c.state.PruneResourceHusks() + } + + if len(c.targets) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Applied changes may be incomplete", + `The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are pending: + terraform plan + +Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, + )) + } + + return c.state, diags +} + +// Plan generates an execution plan for the given context. +// +// The execution plan encapsulates the context and can be stored +// in order to reinstantiate a context later for Apply. +// +// Plan also updates the diff of this context to be the diff generated +// by the plan, so Apply can be called after. +func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { + defer c.acquireRun("plan")() + c.changes = plans.NewChanges() + + var diags tfdiags.Diagnostics + + if len(c.targets) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Resource targeting is in effect", + `You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration. + +The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.`, + )) + } + + varVals := make(map[string]plans.DynamicValue, len(c.variables)) + for k, iv := range c.variables { + // We use cty.DynamicPseudoType here so that we'll save both the + // value _and_ its dynamic type in the plan, so we can recover + // exactly the same value later. + dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to prepare variable value for plan", + fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), + )) + continue + } + varVals[k] = dv + } + + p := &plans.Plan{ + VariableValues: varVals, + TargetAddrs: c.targets, + ProviderSHA256s: c.providerSHA256s, + } + + var operation walkOperation + if c.destroy { + operation = walkPlanDestroy + } else { + // Set our state to be something temporary. We do this so that + // the plan can update a fake state so that variables work, then + // we replace it back with our old state. + old := c.state + if old == nil { + c.state = states.NewState() + } else { + c.state = old.DeepCopy() + } + defer func() { + c.state = old + }() + + operation = walkPlan + } + + // Build the graph. + graphType := GraphTypePlan + if c.destroy { + graphType = GraphTypePlanDestroy + } + graph, graphDiags := c.Graph(graphType, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return nil, diags + } + + // Do the walk + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + p.Changes = c.changes + + return p, diags +} + +// Refresh goes through all the resources in the state and refreshes them +// to their latest state. This will update the state that this context +// works with, along with returning it. +// +// Even in the case an error is returned, the state may be returned and +// will potentially be partially updated. +func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { + defer c.acquireRun("refresh")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Refresh builds a partial changeset as part of its work because it must + // create placeholder stubs for any resource instances that'll be created + // in subsequent plan so that provider configurations and data resources + // can interpolate from them. This plan is always thrown away after + // the operation completes, restoring any existing changeset. + oldChanges := c.changes + defer func() { c.changes = oldChanges }() + c.changes = plans.NewChanges() + + // Build the graph. + graph, diags := c.Graph(GraphTypeRefresh, nil) + if diags.HasErrors() { + return nil, diags + } + + // Do the walk + _, walkDiags := c.walk(graph, walkRefresh) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + + // During our walk we will have created planned object placeholders in + // state for resource instances that are in configuration but not yet + // created. These were created only to allow expression evaluation to + // work properly in provider and data blocks during the walk and must + // now be discarded, since a subsequent plan walk is responsible for + // creating these "for real". + // TODO: Consolidate refresh and plan into a single walk, so that the + // refresh walk doesn't need to emulate various aspects of the plan + // walk in order to properly evaluate provider and data blocks. + c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() + + return c.state, diags +} + +// Stop stops the running task. +// +// Stop will block until the task completes. +func (c *Context) Stop() { + log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") + + c.l.Lock() + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] terraform: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() + + // Stop the context + c.runContextCancel() + c.runContextCancel = nil + } + + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + log.Printf("[INFO] terraform: waiting for graceful stop to complete") + cond.Wait() + } + + log.Printf("[WARN] terraform: stop complete") +} + +// Validate performs semantic validation of the configuration, and returning +// any warnings or errors. +// +// Syntax and structural checks are performed by the configuration loader, +// and so are not repeated here. +func (c *Context) Validate() tfdiags.Diagnostics { + defer c.acquireRun("validate")() + + var diags tfdiags.Diagnostics + + // Validate input variables. We do this only for the values supplied + // by the root module, since child module calls are validated when we + // visit their graph nodes. + if c.config != nil { + varDiags := checkInputVariables(c.config.Module.Variables, c.variables) + diags = diags.Append(varDiags) + } + + // If we have errors at this point then we probably won't be able to + // construct a graph without producing redundant errors, so we'll halt early. + if diags.HasErrors() { + return diags + } + + // Build the graph so we can walk it and run Validate on nodes. + // We also validate the graph generated here, but this graph doesn't + // necessarily match the graph that Plan will generate, so we'll validate the + // graph again later after Planning. + graph, graphDiags := c.Graph(GraphTypeValidate, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return diags + } + + // Walk + walker, walkDiags := c.walk(graph, walkValidate) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags + } + + return diags +} + +// Config returns the configuration tree associated with this context. +func (c *Context) Config() *configs.Config { + return c.config +} + +// Variables will return the mapping of variables that were defined +// for this Context. If Input was called, this mapping may be different +// than what was given. +func (c *Context) Variables() InputValues { + return c.variables +} + +// SetVariable sets a variable after a context has already been built. +func (c *Context) SetVariable(k string, v cty.Value) { + c.variables[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } +} + +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. + c.l.Lock() + defer c.l.Unlock() + + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() + } + + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + // Reset the shadow errors + c.shadowErr = nil + + return c.releaseRun +} + +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields + c.l.Lock() + defer c.l.Unlock() + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil +} + +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) + + walker := c.graphWalker(operation) + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + diags := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + return walker, diags +} + +func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { + return &ContextGraphWalker{ + Context: c, + State: c.state.SyncWrapper(), + Changes: c.changes.SyncWrapper(), + Operation: operation, + StopContext: c.runContext, + RootVariableValues: c.variables, + } +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + go func() { + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang Terraform. + walker.providerLock.Lock() + ps := make([]providers.Interface, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait +} + +// ShimLegacyState is a helper that takes the legacy state type and +// converts it to the new state type. +// +// This is implemented as a state file upgrade, so it will not preserve +// parts of the state structure that are not included in a serialized state, +// such as the resolved results of any local values, outputs in non-root +// modules, etc. +func ShimLegacyState(legacy *State) (*states.State, error) { + if legacy == nil { + return nil, nil + } + var buf bytes.Buffer + err := WriteState(legacy, &buf) + if err != nil { + return nil, err + } + f, err := statefile.Read(&buf) + if err != nil { + return nil, err + } + return f.State, err +} + +// MustShimLegacyState is a wrapper around ShimLegacyState that panics if +// the conversion does not succeed. This is primarily intended for tests where +// the given legacy state is an object constructed within the test. +func MustShimLegacyState(legacy *State) *states.State { + ret, err := ShimLegacyState(legacy) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go new file mode 100644 index 000000000..a627996e3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_components.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given + // type. The "uid" is a unique identifier for this provider being + // initialized that can be used for internal tracking. + ResourceProvider(typ, uid string) (providers.Interface, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the + // given type. The "uid" is a unique identifier for this provisioner + // being initialized that can be used for internal tracking. + ResourceProvisioner(typ, uid string) (provisioners.Interface, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[string]providers.Factory + provisioners map[string]ProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + result := make([]string, len(c.providers)) + for k := range c.providers { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + result := make([]string, len(c.provisioners)) + for k := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go new file mode 100644 index 000000000..4448d8706 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_graph_type.go @@ -0,0 +1,32 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=GraphType context_graph_type.go + +// GraphType is an enum of the type of graph to create with a Context. +// The values of the constants may change so they shouldn't be depended on; +// always use the constant name. +type GraphType byte + +const ( + GraphTypeInvalid GraphType = 0 + GraphTypeLegacy GraphType = iota + GraphTypeRefresh + GraphTypePlan + GraphTypePlanDestroy + GraphTypeApply + GraphTypeValidate + GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. +) + +// GraphTypeMap is a mapping of human-readable string to GraphType. This +// is useful to use as the mechanism for human input for configurable +// graph types. +var GraphTypeMap = map[string]GraphType{ + "apply": GraphTypeApply, + "plan": GraphTypePlan, + "plan-destroy": GraphTypePlanDestroy, + "refresh": GraphTypeRefresh, + "legacy": GraphTypeLegacy, + "validate": GraphTypeValidate, + "eval": GraphTypeEval, +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go new file mode 100644 index 000000000..9a9cd9626 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_import.go @@ -0,0 +1,83 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportOpts are used as the configuration for Import. +type ImportOpts struct { + // Targets are the targets to import + Targets []*ImportTarget + + // Config is optional, and specifies a config tree that will be loaded + // into the graph and evaluated. This is the source for provider + // configurations. + Config *configs.Config +} + +// ImportTarget is a single resource to import. +type ImportTarget struct { + // Addr is the address for the resource instance that the new object should + // be imported into. + Addr addrs.AbsResourceInstance + + // ID is the ID of the resource to import. This is resource-specific. + ID string + + // ProviderAddr is the address of the provider that should handle the import. + ProviderAddr addrs.AbsProviderConfig +} + +// Import takes already-created external resources and brings them +// under Terraform management. Import requires the exact type, name, and ID +// of the resources to import. +// +// This operation is idempotent. If the requested resource is already +// imported, no changes are made to the state. +// +// Further, this operation also gracefully handles partial state. If during +// an import there is a failure, all previously imported resources remain +// imported. +func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Hold a lock since we can modify our own state here + defer c.acquireRun("import")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // If no module is given, default to the module configured with + // the Context. + config := opts.Config + if config == nil { + config = c.config + } + + // Initialize our graph builder + builder := &ImportGraphBuilder{ + ImportTargets: opts.Targets, + Config: config, + Components: c.components, + Schemas: c.schemas, + } + + // Build the graph! + graph, graphDiags := builder.Build(addrs.RootModuleInstance) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return c.state, diags + } + + // Walk it + _, walkDiags := c.walk(graph, walkImport) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return c.state, diags + } + + return c.state, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go new file mode 100644 index 000000000..b99f1afac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/context_input.go @@ -0,0 +1,251 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + defer c.acquireRun("input")() + + if c.uiInput == nil { + log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") + return diags + } + + ctx := context.Background() + + if mode&InputModeVar != 0 { + log.Printf("[TRACE] Context.Input: Prompting for variables") + + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + configs := c.config.Module.Variables + names := make([]string, 0, len(configs)) + for name := range configs { + names = append(names, name) + } + sort.Strings(names) + Variables: + for _, n := range names { + v := configs[n] + + // If we only care about unset variables, then we should set any + // variable that is already set. + if mode&InputModeVarUnset != 0 { + if _, isSet := c.variables[n]; isSet { + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Context.uiInput is nil during input walk") + continue + } + + // Ask the user for a value for this variable + var rawValue string + retry := 0 + for { + var err error + rawValue, err = c.uiInput.Input(ctx, &InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to request interactive input", + fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), + )) + return diags + } + + if rawValue == "" && v.Default == cty.NilVal { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required variable not assigned", + fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), + )) + continue Variables + } + retry++ + continue + } + + break + } + + val, valDiags := v.ParsingMode.Parse(n, rawValue) + diags = diags.Append(valDiags) + if diags.HasErrors() { + continue + } + + c.variables[n] = &InputValue{ + Value: val, + SourceType: ValueFromInput, + } + } + } + + if mode&InputModeProvider != 0 { + log.Printf("[TRACE] Context.Input: Prompting for provider arguments") + + // We prompt for input only for provider configurations defined in + // the root module. At the time of writing that is an arbitrary + // restriction, but we have future plans to support "count" and + // "for_each" on modules that will then prevent us from supporting + // input for child module configurations anyway (since we'd need to + // dynamic-expand first), and provider configurations in child modules + // are not recommended since v0.11 anyway, so this restriction allows + // us to keep this relatively simple without significant hardship. + + pcs := make(map[string]*configs.Provider) + pas := make(map[string]addrs.ProviderConfig) + for _, pc := range c.config.Module.ProviderConfigs { + addr := pc.Addr() + pcs[addr.String()] = pc + pas[addr.String()] = addr + log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) + } + // We also need to detect _implied_ provider configs from resources. + // These won't have *configs.Provider objects, but they will still + // exist in the map and we'll just treat them as empty below. + for _, rc := range c.config.Module.ManagedResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) + } + } + for _, rc := range c.config.Module.DataResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) + } + } + + for pk, pa := range pas { + pc := pcs[pk] // will be nil if this is an implied config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: pk, + QueryPrefix: pk + ".", + UIInput: c.uiInput, + } + + schema := c.schemas.ProviderConfig(pa.Type) + if schema == nil { + // Could either be an incorrect config or just an incomplete + // mock in tests. We'll let a later pass decide, and just + // ignore this for the purposes of gathering input. + log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) + continue + } + + // For our purposes here we just want to detect if attrbutes are + // set in config at all, so rather than doing a full decode + // (which would require us to prepare an evalcontext, etc) we'll + // use the low-level HCL API to process only the top-level + // structure. + var attrExprs hcl.Attributes // nil if there is no config + if pc != nil && pc.Config != nil { + lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) + content, _, diags := pc.Config.PartialContent(lowLevelSchema) + if diags.HasErrors() { + log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) + continue + } + attrExprs = content.Attributes + } + + keys := make([]string, 0, len(schema.Attributes)) + for key := range schema.Attributes { + keys = append(keys, key) + } + sort.Strings(keys) + + vals := map[string]cty.Value{} + for _, key := range keys { + attrS := schema.Attributes[key] + if attrS.Optional { + continue + } + if attrExprs != nil { + if _, exists := attrExprs[key]; exists { + continue + } + } + if !attrS.Type.Equals(cty.String) { + continue + } + + log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) + rawVal, err := input.Input(ctx, &InputOpts{ + Id: key, + Query: key, + Description: attrS.Description, + }) + if err != nil { + log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) + continue + } + + vals[key] = cty.StringVal(rawVal) + } + + c.providerInputConfig[pk] = vals + log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) + } + } + + return diags +} + +// schemaForInputSniffing returns a transformed version of a given schema +// that marks all attributes as optional, which the Context.Input method can +// use to detect whether a required argument is set without missing arguments +// themselves generating errors. +func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go new file mode 100644 index 000000000..e2f54883b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/diff.go @@ -0,0 +1,1441 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + addr := normalizeModulePath(m.Path) + key := addr.String() + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go new file mode 100644 index 000000000..17464bc06 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/edge_destroy.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// DestroyEdge is an edge that represents a standard "destroy" relationship: +// Target depends on Source because Source is destroying. +type DestroyEdge struct { + S, T dag.Vertex +} + +func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } +func (e *DestroyEdge) Source() dag.Vertex { return e.S } +func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go new file mode 100644 index 000000000..c490c3bcf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalNode is the interface that must be implemented by graph nodes to +// evaluate/execute. +type EvalNode interface { + // Eval evaluates this node with the given context. The second parameter + // are the argument values. These will match in order and 1-1 with the + // results of the Args() return value. + Eval(EvalContext) (interface{}, error) +} + +// GraphNodeEvalable is the interface that graph nodes must implement +// to enable valuation. +type GraphNodeEvalable interface { + EvalTree() EvalNode +} + +// EvalEarlyExitError is a special error return value that can be returned +// by eval nodes that does an early exit. +type EvalEarlyExitError struct{} + +func (EvalEarlyExitError) Error() string { return "early exit" } + +// Eval evaluates the given EvalNode with the given context, properly +// evaluating all args in the correct order. +func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { + // Call the lower level eval which doesn't understand early exit, + // and if we early exit, it isn't an error. + result, err := EvalRaw(n, ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + return nil, nil + } + } + + return result, err +} + +// EvalRaw is like Eval except that it returns all errors, even if they +// signal something normal such as EvalEarlyExitError. +func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { + path := "unknown" + if ctx != nil { + path = ctx.Path().String() + } + if path == "" { + path = "" + } + + log.Printf("[TRACE] %s: eval: %T", path, n) + output, err := n.Eval(ctx) + if err != nil { + switch err.(type) { + case EvalEarlyExitError: + log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) + case tfdiags.NonFatalError: + log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) + default: + log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) + } + } + + return output, err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go new file mode 100644 index 000000000..6beeaea98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_apply.go @@ -0,0 +1,656 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalApply is an EvalNode implementation that writes the diff to +// the full diff. +type EvalApply struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + CreateNew *bool + Error *error +} + +// TODO: test +func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + change := *n.Change + provider := *n.Provider + state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + if state == nil { + state = &states.ResourceInstanceObject{} + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + if n.CreateNew != nil { + *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) + } + + configVal := cty.NullVal(cty.DynamicPseudoType) + if n.Config != nil { + var configDiags tfdiags.Diagnostics + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + } + + if !configVal.IsWhollyKnown() { + return nil, fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + absAddr, + ) + } + + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: change.Before, + Config: configVal, + PlannedState: change.After, + PlannedPrivate: change.Private, + }) + applyDiags := resp.Diagnostics + if n.Config != nil { + applyDiags = applyDiags.InConfigBody(n.Config.Config) + } + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } + + // Ideally we'd produce an error or warning here if newVal is nil and + // there are no errors in diags, because that indicates a buggy + // provider not properly reporting its result, but unfortunately many + // of our historical test mocks behave in this way and so producing + // a diagnostic here fails hundreds of tests. Instead, we must just + // silently retain the old value for now. Returning a nil value with + // no errors is still always considered a bug in the provider though, + // and should be fixed for any "real" providers that do it. + } + + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags.Err() + } + + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", + n.Addr.Absolute(ctx.Path()), pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + } + } + } + + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like Terraform is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + } + + // Sometimes providers return a null value when an operation fails for some + // reason, but we'd rather keep the prior state so that the error can be + // corrected on a subsequent run. We must only do this for null new value + // though, or else we may discard partial updates the provider was able to + // complete. + if diags.HasErrors() && newVal.IsNull() { + // Otherwise, we'll continue but using the prior state as the new value, + // making this effectively a no-op. If the item really _has_ been + // deleted then our next refresh will detect that and fix it up. + // If change.Action is Create then change.Before will also be null, + // which is fine. + newVal = change.Before + } + + var newState *states.ResourceInstanceObject + if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case + newState = &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node + } + } + + // Write the final state + if n.Output != nil { + *n.Output = newState + } + + if diags.HasErrors() { + // If the caller provided an error pointer then they are expected to + // handle the error some other way and we treat our own result as + // success. + if n.Error != nil { + err := diags.Err() + *n.Error = err + log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) + return nil, nil + } + } + + return nil, diags.ErrWithWarnings() +} + +// EvalApplyPre is an EvalNode implementation that does the pre-Apply work +type EvalApplyPre struct { + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { + change := *n.Change + absAddr := n.Addr.Absolute(ctx.Path()) + + if change == nil { + panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) + } + + if resourceHasUserVisibleApply(n.Addr) { + priorState := change.Before + plannedNewState := change.After + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPost is an EvalNode implementation that does the post-Apply work +type EvalApplyPost struct { + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Error *error +} + +// TODO: test +func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + if resourceHasUserVisibleApply(n.Addr) { + absAddr := n.Addr.Absolute(ctx.Path()) + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + var err error + if n.Error != nil { + err = *n.Error + } + + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(absAddr, n.Gen, newState, err) + }) + if hookErr != nil { + return nil, hookErr + } + } + + return nil, *n.Error +} + +// EvalMaybeTainted is an EvalNode that takes the planned change, new value, +// and possible error from an apply operation and produces a new instance +// object marked as tainted if it appears that a create operation has failed. +// +// This EvalNode never returns an error, to ensure that a subsequent EvalNode +// can still record the possibly-tainted object in the state. +type EvalMaybeTainted struct { + Addr addrs.ResourceInstance + Gen states.Generation + Change **plans.ResourceInstanceChange + State **states.ResourceInstanceObject + Error *error + + // If StateOutput is not nil, its referent will be assigned either the same + // pointer as State or a new object with its status set as Tainted, + // depending on whether an error is given and if this was a create action. + StateOutput **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + change := *n.Change + err := *n.Error + + if state != nil && state.Status == states.ObjectTainted { + log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) + return nil, nil + } + + if n.StateOutput != nil { + if err != nil && change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) + *n.StateOutput = state.AsTainted() + } else { + *n.StateOutput = state + } + } + + return nil, nil +} + +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.ContainingResource().Mode == addrs.ManagedResourceMode +} + +// EvalApplyProvisioners is an EvalNode implementation that executes +// the provisioners for a resource. +// +// TODO(mitchellh): This should probably be split up into a more fine-grained +// ApplyProvisioner (single) that is looped over. +type EvalApplyProvisioners struct { + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject + ResourceConfig *configs.Resource + CreateNew *bool + Error *error + + // When is the type of provisioner to run at this point + When configs.ProvisionerWhen +} + +// TODO: test +func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := *n.State + if state == nil { + log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil, nil + } + if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { + // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil, nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) + return nil, nil + } + + provs := n.filterProvisioners() + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil, nil + } + + if n.Error != nil && *n.Error != nil { + // We're already tainted, so just return out + return nil, nil + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstance(absAddr, state.Value) + }) + if err != nil { + return nil, err + } + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + err := n.apply(ctx, provs) + if err != nil { + *n.Error = multierror.Append(*n.Error, err) + if n.Error == nil { + return nil, err + } else { + log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) + return nil, nil + } + } + + { + // Call post hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstance(absAddr, state.Value) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { + // Fast path the zero case + if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { + return nil + } + + if len(n.ResourceConfig.Managed.Provisioners) == 0 { + return nil + } + + result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) + for _, p := range n.ResourceConfig.Managed.Provisioners { + if p.When == n.When { + result = append(result, p) + } + } + + return result +} + +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { + var diags tfdiags.Diagnostics + instanceAddr := n.Addr + absAddr := instanceAddr.Absolute(ctx.Path()) + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { + baseConn = n.ResourceConfig.Managed.Connection.Config + } + + for _, prov := range provs { + log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) + + // Get the provisioner + provisioner := ctx.Provisioner(prov.Type) + schema := ctx.ProvisionerSchema(prov.Type) + + forEach, forEachDiags := evaluateResourceForEachExpression(n.ResourceConfig.ForEach, ctx) + diags = diags.Append(forEachDiags) + keyData := EvalDataForInstanceKey(instanceAddr.Key, forEach) + + // Evaluate the main provisioner configuration. + config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) + diags = diags.Append(configDiags) + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config + } + + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn + } + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + // "on failure continue" setting only applies to failures of the + // provisioner itself, not to invalid configuration. + return diags.Err() + } + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionInstanceStep(absAddr, prov.Type) + }) + if err != nil { + return err + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(absAddr, prov.Type, msg) + return HookActionContinue, nil + }) + } + + output := CallbackUIOutput{OutputFn: outputFn} + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: config, + Connection: connInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) + }) + + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags.Err() + } + } + + // Deal with the hook + if hookErr != nil { + return hookErr + } + } + + return diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go new file mode 100644 index 000000000..d13a96529 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_check_prevent_destroy.go @@ -0,0 +1,47 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalPreventDestroy is an EvalNode implementation that returns an +// error if a resource has PreventDestroy configured and the diff +// would destroy the resource. +type EvalCheckPreventDestroy struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Change **plans.ResourceInstanceChange +} + +func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { + if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { + return nil, nil + } + + change := *n.Change + preventDestroy := n.Config.Managed.PreventDestroy + + if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Instance cannot be destroyed", + Detail: fmt.Sprintf( + "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", + n.Addr.Absolute(ctx.Path()).String(), + ), + Subject: &n.Config.DeclRange, + }) + return nil, diags.Err() + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go new file mode 100644 index 000000000..4fa011e2b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context.go @@ -0,0 +1,133 @@ +package terraform + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// EvalContext is the interface that is given to eval nodes to execute. +type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via Terraform.Context.Stop() + Stopped() <-chan struct{} + + // Path is the current module path. + Path() addrs.ModuleInstance + + // Hook is used to call hook methods. The callback is called for each + // hook and should return the hook action to take and the error. + Hook(func(Hook) (HookAction, error)) error + + // Input is the UIInput object for interacting with the UI. + Input() UIInput + + // InitProvider initializes the provider with the given type and address, and + // returns the implementation of the resource provider or an error. + // + // It is an error to initialize the same provider more than once. + InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) + + // Provider gets the provider instance with the given address (already + // initialized) or returns nil if the provider isn't initialized. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + // InitProvider must've been called on the EvalContext of the module + // that owns the given provider before calling this method. + Provider(addrs.AbsProviderConfig) providers.Interface + + // ProviderSchema retrieves the schema for a particular provider, which + // must have already been initialized with InitProvider. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema + + // CloseProvider closes provider connections that aren't needed anymore. + CloseProvider(addrs.ProviderConfig) error + + // ConfigureProvider configures the provider with the given + // configuration. This is a separate context call because this call + // is used to store the provider configuration for inheritance lookups + // with ParentProviderConfig(). + ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics + + // ProviderInput and SetProviderInput are used to configure providers + // from user input. + ProviderInput(addrs.ProviderConfig) map[string]cty.Value + SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) + + // InitProvisioner initializes the provisioner with the given name and + // returns the implementation of the resource provisioner or an error. + // + // It is an error to initialize the same provisioner more than once. + InitProvisioner(string) (provisioners.Interface, error) + + // Provisioner gets the provisioner instance with the given name (already + // initialized) or returns nil if the provisioner isn't initialized. + Provisioner(string) provisioners.Interface + + // ProvisionerSchema retrieves the main configuration schema for a + // particular provisioner, which must have already been initialized with + // InitProvisioner. + ProvisionerSchema(string) *configschema.Block + + // CloseProvisioner closes provisioner connections that aren't needed + // anymore. + CloseProvisioner(string) error + + // EvaluateBlock takes the given raw configuration block and associated + // schema and evaluates it to produce a value of an object type that + // conforms to the implied type of the schema. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + // + // The "key" argument is also optional. If given, it is the instance key + // of the current object within the multi-instance container it belongs + // to. For example, on a resource block with "count" set this should be + // set to a different addrs.IntKey for each instance created from that + // block. Set this to addrs.NoKey if not appropriate. + // + // The returned body is an expanded version of the given body, with any + // "dynamic" blocks replaced with zero or more static blocks. This can be + // used to extract correct source location information about attributes of + // the returned object value. + EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) + + // EvaluateExpr takes the given HCL expression and evaluates it to produce + // a value. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) + + // EvaluationScope returns a scope that can be used to evaluate reference + // addresses in this context. + EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope + + // SetModuleCallArguments defines values for the variables of a particular + // child module call. + // + // Calling this function multiple times has merging behavior, keeping any + // previously-set keys that are not present in the new map. + SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) + + // Changes returns the writer object that can be used to write new proposed + // changes into the global changes set. + Changes() *plans.ChangesSync + + // State returns a wrapper object that provides safe concurrent access to + // the global state. + State() *states.SyncState +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go new file mode 100644 index 000000000..bd414a960 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_builtin.go @@ -0,0 +1,329 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// BuiltinEvalContext is an EvalContext implementation that is used by +// Terraform by default. +type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + + // PathValue is the Path that this context is operating within. + PathValue addrs.ModuleInstance + + // Evaluator is used for evaluating expressions within the scope of this + // eval context. + Evaluator *Evaluator + + // Schemas is a repository of all of the schemas we should need to + // decode configuration blocks and expressions. This must be constructed by + // the caller to include schemas for all of the providers, resource types, + // data sources and provisioners used by the given configuration and + // state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // VariableValues contains the variable values across all modules. This + // structure is shared across the entire containing context, and so it + // may be accessed only when holding VariableValuesLock. + // The keys of the first level of VariableValues are the string + // representations of addrs.ModuleInstance values. The second-level keys + // are variable names within each module instance. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + Components contextComponentFactory + Hooks []Hook + InputValue UIInput + ProviderCache map[string]providers.Interface + ProviderInputConfig map[string]map[string]cty.Value + ProviderLock *sync.Mutex + ProvisionerCache map[string]provisioners.Interface + ProvisionerLock *sync.Mutex + ChangesValue *plans.ChangesSync + StateValue *states.SyncState + + once sync.Once +} + +// BuiltinEvalContext implements EvalContext +var _ EvalContext = (*BuiltinEvalContext)(nil) + +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + +func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + for _, h := range ctx.Hooks { + action, err := fn(h) + if err != nil { + return err + } + + switch action { + case HookActionContinue: + continue + case HookActionHalt: + // Return an early exit error to trigger an early exit + log.Printf("[WARN] Early exit triggered by hook: %T", h) + return EvalEarlyExitError{} + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Input() UIInput { + return ctx.InputValue +} + +func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { + ctx.once.Do(ctx.init) + absAddr := addr.Absolute(ctx.Path()) + + // If we already initialized, it is an error + if p := ctx.Provider(absAddr); p != nil { + return nil, fmt.Errorf("%s is already initialized", addr) + } + + // Warning: make sure to acquire these locks AFTER the call to Provider + // above, since it also acquires locks. + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := absAddr.String() + + p, err := ctx.Components.ResourceProvider(typeName, key) + if err != nil { + return nil, err + } + + log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) + ctx.ProviderCache[key] = p + + return p, nil +} + +func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + return ctx.ProviderCache[addr.String()] +} + +func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + ctx.once.Do(ctx.init) + + return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) +} + +func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + key := addr.Absolute(ctx.Path()).String() + provider := ctx.ProviderCache[key] + if provider != nil { + delete(ctx.ProviderCache, key) + return provider.Close() + } + + return nil +} + +func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + absAddr := addr.Absolute(ctx.Path()) + p := ctx.Provider(absAddr) + if p == nil { + diags = diags.Append(fmt.Errorf("%s not initialized", addr)) + return diags + } + + providerSchema := ctx.ProviderSchema(absAddr) + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) + return diags + } + + req := providers.ConfigureRequest{ + TerraformVersion: version.String(), + Config: cfg, + } + + resp := p.Configure(req) + return resp.Diagnostics +} + +func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + return nil + } + + return ctx.ProviderInputConfig[pc.String()] +} + +func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { + absProvider := pc.Absolute(ctx.Path()) + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") + return + } + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderInputConfig[absProvider.String()] = c + ctx.ProviderLock.Unlock() +} + +func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provisioner(n); p != nil { + return nil, fmt.Errorf("Provisioner '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provisioner + // above, since it also acquires locks. + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + + p, err := ctx.Components.ResourceProvisioner(n, key) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[key] = p + + return p, nil +} + +func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + return ctx.ProvisionerCache[key] +} + +func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { + ctx.once.Do(ctx.init) + + return ctx.Schemas.ProvisionerConfig(n) +} + +func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + key := PathObjectCacheKey(ctx.Path(), n) + + prov := ctx.ProvisionerCache[key] + if prov != nil { + return prov.Close() + } + + return nil +} + +func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + scope := ctx.EvaluationScope(self, keyData) + body, evalDiags := scope.ExpandBlock(body, schema) + diags = diags.Append(evalDiags) + val, evalDiags := scope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + return val, body, diags +} + +func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) + return scope.EvalExpr(expr, wantType) +} + +func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + data := &evaluationStateData{ + Evaluator: ctx.Evaluator, + ModulePath: ctx.PathValue, + InstanceKeyData: keyData, + Operation: ctx.Evaluator.Operation, + } + return ctx.Evaluator.Scope(data, self) +} + +func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { + return ctx.PathValue +} + +func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() + + childPath := n.ModuleInstance(ctx.PathValue) + key := childPath.String() + + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = vals + return + } + + for k, v := range vals { + args[k] = v + } +} + +func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { + return ctx.ChangesValue +} + +func (ctx *BuiltinEvalContext) State() *states.SyncState { + return ctx.StateValue +} + +func (ctx *BuiltinEvalContext) init() { +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go new file mode 100644 index 000000000..786316fb3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_context_mock.go @@ -0,0 +1,319 @@ +package terraform + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// MockEvalContext is a mock version of EvalContext that can be used +// for tests. +type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + + HookCalled bool + HookHook Hook + HookError error + + InputCalled bool + InputInput UIInput + + InitProviderCalled bool + InitProviderType string + InitProviderAddr addrs.ProviderConfig + InitProviderProvider providers.Interface + InitProviderError error + + ProviderCalled bool + ProviderAddr addrs.AbsProviderConfig + ProviderProvider providers.Interface + + ProviderSchemaCalled bool + ProviderSchemaAddr addrs.AbsProviderConfig + ProviderSchemaSchema *ProviderSchema + + CloseProviderCalled bool + CloseProviderAddr addrs.ProviderConfig + CloseProviderProvider providers.Interface + + ProviderInputCalled bool + ProviderInputAddr addrs.ProviderConfig + ProviderInputValues map[string]cty.Value + + SetProviderInputCalled bool + SetProviderInputAddr addrs.ProviderConfig + SetProviderInputValues map[string]cty.Value + + ConfigureProviderCalled bool + ConfigureProviderAddr addrs.ProviderConfig + ConfigureProviderConfig cty.Value + ConfigureProviderDiags tfdiags.Diagnostics + + InitProvisionerCalled bool + InitProvisionerName string + InitProvisionerProvisioner provisioners.Interface + InitProvisionerError error + + ProvisionerCalled bool + ProvisionerName string + ProvisionerProvisioner provisioners.Interface + + ProvisionerSchemaCalled bool + ProvisionerSchemaName string + ProvisionerSchemaSchema *configschema.Block + + CloseProvisionerCalled bool + CloseProvisionerName string + CloseProvisionerProvisioner provisioners.Interface + + EvaluateBlockCalled bool + EvaluateBlockBody hcl.Body + EvaluateBlockSchema *configschema.Block + EvaluateBlockSelf addrs.Referenceable + EvaluateBlockKeyData InstanceKeyEvalData + EvaluateBlockResultFunc func( + body hcl.Body, + schema *configschema.Block, + self addrs.Referenceable, + keyData InstanceKeyEvalData, + ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateBlockResult cty.Value + EvaluateBlockExpandedBody hcl.Body + EvaluateBlockDiags tfdiags.Diagnostics + + EvaluateExprCalled bool + EvaluateExprExpr hcl.Expression + EvaluateExprWantType cty.Type + EvaluateExprSelf addrs.Referenceable + EvaluateExprResultFunc func( + expr hcl.Expression, + wantType cty.Type, + self addrs.Referenceable, + ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateExprResult cty.Value + EvaluateExprDiags tfdiags.Diagnostics + + EvaluationScopeCalled bool + EvaluationScopeSelf addrs.Referenceable + EvaluationScopeKeyData InstanceKeyEvalData + EvaluationScopeScope *lang.Scope + + PathCalled bool + PathPath addrs.ModuleInstance + + SetModuleCallArgumentsCalled bool + SetModuleCallArgumentsModule addrs.ModuleCallInstance + SetModuleCallArgumentsValues map[string]cty.Value + + ChangesCalled bool + ChangesChanges *plans.ChangesSync + + StateCalled bool + StateState *states.SyncState +} + +// MockEvalContext implements EvalContext +var _ EvalContext = (*MockEvalContext)(nil) + +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + +func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + c.HookCalled = true + if c.HookHook != nil { + if _, err := fn(c.HookHook); err != nil { + return err + } + } + + return c.HookError +} + +func (c *MockEvalContext) Input() UIInput { + c.InputCalled = true + return c.InputInput +} + +func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { + c.InitProviderCalled = true + c.InitProviderType = t + c.InitProviderAddr = addr + return c.InitProviderProvider, c.InitProviderError +} + +func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { + c.ProviderCalled = true + c.ProviderAddr = addr + return c.ProviderProvider +} + +func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + c.ProviderSchemaCalled = true + c.ProviderSchemaAddr = addr + return c.ProviderSchemaSchema +} + +func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { + c.CloseProviderCalled = true + c.CloseProviderAddr = addr + return nil +} + +func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + c.ConfigureProviderCalled = true + c.ConfigureProviderAddr = addr + c.ConfigureProviderConfig = cfg + return c.ConfigureProviderDiags +} + +func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { + c.ProviderInputCalled = true + c.ProviderInputAddr = addr + return c.ProviderInputValues +} + +func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { + c.SetProviderInputCalled = true + c.SetProviderInputAddr = addr + c.SetProviderInputValues = vals +} + +func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { + c.InitProvisionerCalled = true + c.InitProvisionerName = n + return c.InitProvisionerProvisioner, c.InitProvisionerError +} + +func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { + c.ProvisionerCalled = true + c.ProvisionerName = n + return c.ProvisionerProvisioner +} + +func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { + c.ProvisionerSchemaCalled = true + c.ProvisionerSchemaName = n + return c.ProvisionerSchemaSchema +} + +func (c *MockEvalContext) CloseProvisioner(n string) error { + c.CloseProvisionerCalled = true + c.CloseProvisionerName = n + return nil +} + +func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + c.EvaluateBlockCalled = true + c.EvaluateBlockBody = body + c.EvaluateBlockSchema = schema + c.EvaluateBlockSelf = self + c.EvaluateBlockKeyData = keyData + if c.EvaluateBlockResultFunc != nil { + return c.EvaluateBlockResultFunc(body, schema, self, keyData) + } + return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags +} + +func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + c.EvaluateExprCalled = true + c.EvaluateExprExpr = expr + c.EvaluateExprWantType = wantType + c.EvaluateExprSelf = self + if c.EvaluateExprResultFunc != nil { + return c.EvaluateExprResultFunc(expr, wantType, self) + } + return c.EvaluateExprResult, c.EvaluateExprDiags +} + +// installSimpleEval is a helper to install a simple mock implementation of +// both EvaluateBlock and EvaluateExpr into the receiver. +// +// These default implementations will either evaluate the given input against +// the scope in field EvaluationScopeScope or, if it is nil, with no eval +// context at all so that only constant values may be used. +// +// This function overwrites any existing functions installed in fields +// EvaluateBlockResultFunc and EvaluateExprResultFunc. +func (c *MockEvalContext) installSimpleEval() { + c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + var diags tfdiags.Diagnostics + body, diags = scope.ExpandBlock(body, schema) + if diags.HasErrors() { + return cty.DynamicVal, body, diags + } + val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return cty.DynamicVal, body, diags + } + return val, body, diags + } + + // Fallback codepath supporting constant values only. + val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) + return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) + } + c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + return scope.EvalExpr(expr, wantType) + } + + // Fallback codepath supporting constant values only. + var diags tfdiags.Diagnostics + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(err) + return cty.DynamicVal, diags + } + return val, diags + } +} + +func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + c.EvaluationScopeCalled = true + c.EvaluationScopeSelf = self + c.EvaluationScopeKeyData = keyData + return c.EvaluationScopeScope +} + +func (c *MockEvalContext) Path() addrs.ModuleInstance { + c.PathCalled = true + return c.PathPath +} + +func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { + c.SetModuleCallArgumentsCalled = true + c.SetModuleCallArgumentsModule = n + c.SetModuleCallArgumentsValues = values +} + +func (c *MockEvalContext) Changes() *plans.ChangesSync { + c.ChangesCalled = true + return c.ChangesChanges +} + +func (c *MockEvalContext) State() *states.SyncState { + c.StateCalled = true + return c.StateState +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go new file mode 100644 index 000000000..7d6fa4919 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count.go @@ -0,0 +1,120 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// evaluateResourceCountExpression is our standard mechanism for interpreting an +// expression given for a "count" argument on a resource. This should be called +// from the DynamicExpand of a node representing a resource in order to +// determine the final count value. +// +// If the result is zero or positive and no error diagnostics are returned, then +// the result is the literal count value to use. +// +// If the result is -1, this indicates that the given expression is nil and so +// the "count" behavior should not be enabled for this resource at all. +// +// If error diagnostics are returned then the result is always the meaningless +// placeholder value -1. +func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { + count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) + if !known { + // Currently this is a rather bad outcome from a UX standpoint, since we have + // no real mechanism to deal with this situation and all we can do is produce + // an error message. + // FIXME: In future, implement a built-in mechanism for deferring changes that + // can't yet be predicted, and use it to guide the user through several + // plan/apply steps until the desired configuration is eventually reached. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, + Subject: expr.Range().Ptr(), + }) + } + return count, diags +} + +// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression +// except that it handles an unknown result by returning count = 0 and +// a known = false, rather than by reporting the unknown value as an error +// diagnostic. +func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { + if expr == nil { + return -1, true, nil + } + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return -1, true, diags + } + + switch { + case countVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + case !countVal.IsKnown(): + return 0, false, diags + } + + err := gocty.FromCtyValue(countVal, &count) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + } + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + } + + return count, true, diags +} + +// fixResourceCountSetTransition is a helper function to fix up the state when a +// resource transitions its "count" from being set to unset or vice-versa, +// treating a 0-key and a no-key instance as aliases for one another across +// the transition. +// +// The correct time to call this function is in the DynamicExpand method for +// a node representing a resource, just after evaluating the count with +// evaluateResourceCountExpression, and before any other analysis of the +// state such as orphan detection. +// +// This function calls methods on the given EvalContext to update the current +// state in-place, if necessary. It is a no-op if there is no count transition +// taking place. +// +// Since the state is modified in-place, this function must take a writer lock +// on the state. The caller must therefore not also be holding a state lock, +// or this function will block forever awaiting the lock. +func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { + state := ctx.State() + changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) + if changed { + log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go new file mode 100644 index 000000000..aac380632 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_count_boundary.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +// +// This works on the global state. +type EvalCountFixZeroOneBoundaryGlobal struct { + Config *configs.Config +} + +// TODO: test +func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { + // We'll temporarily lock the state to grab the modules, then work on each + // one separately while taking a lock again for each separate resource. + // This means that if another caller concurrently adds a module here while + // we're working then we won't update it, but that's no worse than the + // concurrent writer blocking for our entire fixup process and _then_ + // adding a new module, and in practice the graph node associated with + // this eval depends on everything else in the graph anyway, so there + // should not be concurrent writers. + state := ctx.State().Lock() + moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) + for _, m := range state.Modules { + moduleAddrs = append(moduleAddrs, m.Addr) + } + ctx.State().Unlock() + + for _, addr := range moduleAddrs { + cfg := n.Config.DescendentForInstance(addr) + if cfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + if err := n.fixModule(ctx, addr); err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { + ms := ctx.State().Module(moduleAddr) + cfg := n.Config.DescendentForInstance(moduleAddr) + if ms == nil { + // Theoretically possible for a concurrent writer to delete a module + // while we're running, but in practice the graph node that called us + // depends on everything else in the graph and so there can never + // be a concurrent writer. + return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) + } + if cfg == nil { + return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) + } + + for _, r := range ms.Resources { + addr := r.Addr.Absolute(moduleAddr) + rCfg := cfg.Module.ResourceByAddr(r.Addr) + if rCfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + hasCount := rCfg.Count != nil + fixResourceCountSetTransition(ctx, addr, hasCount) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go new file mode 100644 index 000000000..d6f51c950 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_diff.go @@ -0,0 +1,783 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalCheckPlannedChange is an EvalNode implementation that produces errors +// if the _actual_ expected value is not compatible with what was recorded +// in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our +// error messages will report with that in mind. It's also possible that +// there's a bug in Terraform's Core's own "proposed new value" code in +// EvalDiff. +type EvalCheckPlannedChange struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // We take ResourceInstanceChange objects here just because that's what's + // convenient to pass in from the evaltree implementation, but we really + // only look at the "After" value of each change. + Planned, Actual **plans.ResourceInstanceChange +} + +func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + plannedChange := *n.Planned + actualChange := *n.Actual + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) + } + + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, + plannedChange.Action, actualChange.Action, + ), + )) + } + } + + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + return nil, diags.Err() +} + +// EvalDiff is an EvalNode implementation that detects changes for a given +// resource instance. +type EvalDiff struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + PreviousDiff **plans.ResourceInstanceChange + + // CreateBeforeDestroy is set if either the resource's own config sets + // create_before_destroy explicitly or if dependencies have forced the + // resource to be handled as create_before_destroy in order to avoid + // a dependency cycle. + CreateBeforeDestroy bool + + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputState **states.ResourceInstanceObject + + Stub bool +} + +// TODO: test +func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + + if providerSchema == nil { + return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) + } + if n.ProviderAddr.ProviderConfig.Type == "" { + panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) + } + + var diags tfdiags.Diagnostics + + // Evaluate the configuration + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + absAddr := n.Addr.Absolute(ctx.Path()) + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if state != nil { + if state.Status != states.ObjectTainted { + priorVal = state.Value + priorPrivate = state.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = state.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) + + // Call pre-diff hook + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + } + + log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) + // Allow the provider to validate the final set of values. + // The config was statically validated early on, but there may have been + // unknown values which the provider could not validate at the time. + validateResp := provider.ValidateResourceTypeConfig( + providers.ValidateResourceTypeConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() + } + + // The provider gets an opportunity to customize the proposed new value, + // which in turn produces the _planned_ new value. But before + // we send back this information, we need to process ignore_changes + // so that CustomizeDiff will not act on them + var ignoreChangeDiags tfdiags.Diagnostics + proposedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, proposedNewVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, diags.Err() + } + + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: priorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + plannedNewVal := resp.PlannedState + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + return nil, diags.Err() + } + } + + // TODO: We should be able to remove this repeat of processing ignored changes + // after the plan, which helps providers relying on old behavior "just work" + // in the next major version, such that we can be stricter about ignore_changes + // values + plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(priorVal, plannedNewVal) + diags = diags.Append(ignoreChangeDiags) + if ignoreChangeDiags.HasErrors() { + return nil, diags.Err() + } + + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } + + priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, path, + ), + )) + continue + } + + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } + + eqV := plannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } + } + + eqV := plannedNewVal.Equals(priorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + switch { + case priorVal.IsNull(): + action = plans.Create + case eq: + action = plans.NoOp + case !reqRep.Empty(): + // If there are any "requires replace" paths left _after our filtering + // above_ then this is a replace action. + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, + }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + return nil, diags.Err() + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + } + + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && priorValTainted != cty.NilVal { + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if n.PreviousDiff != nil { + prevChange := *n.PreviousDiff + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } + } + + // Call post-refresh hook + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) + }) + if err != nil { + return nil, err + } + } + + // Update our output if we care + if n.OutputChange != nil { + *n.OutputChange = &plans.ResourceInstanceChange{ + Addr: absAddr, + Private: plannedPrivate, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: action, + Before: priorVal, + After: plannedNewVal, + }, + RequiredReplace: reqRep, + } + } + + if n.OutputValue != nil { + *n.OutputValue = configVal + } + + // Update the state if we care + if n.OutputState != nil { + *n.OutputState = &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, + Private: plannedPrivate, + } + } + + return nil, nil +} + +func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return proposed, nil + } + + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return proposed, nil + } + if ignoreAll { + return prior, nil + } + if prior.IsNull() || proposed.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return proposed, nil + } + + return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) +} + +func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { + // When we walk below we will be using cty.Path values for comparison, so + // we'll convert our traversals here so we can compare more easily. + ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) + for i, traversal := range ignoreChanges { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + ignoreChangesPath[i] = path + } + + var diags tfdiags.Diagnostics + ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { + // First we must see if this is a path that's being ignored at all. + // We're looking for an exact match here because this walk will visit + // leaf values first and then their containers, and we want to do + // the "ignore" transform once we reach the point indicated, throwing + // away any deeper values we already produced at that point. + var ignoreTraversal hcl.Traversal + for i, candidate := range ignoreChangesPath { + if path.Equals(candidate) { + ignoreTraversal = ignoreChanges[i] + } + } + if ignoreTraversal == nil { + return v, nil + } + + // If we're able to follow the same path through the prior value, + // we'll take the value there instead, effectively undoing the + // change that was planned. + priorV, diags := hcl.ApplyPath(prior, path, nil) + if diags.HasErrors() { + // We just ignore the errors and move on here, since we assume it's + // just because the prior value was a slightly-different shape. + // It could potentially also be that the traversal doesn't match + // the schema, but we should've caught that during the validate + // walk if so. + return v, nil + } + return priorV, nil + }) + return ret, diags +} + +// EvalDiffDestroy is an EvalNode implementation that returns a plain +// destroy diff. +type EvalDiffDestroy struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + State **states.ResourceInstanceObject + ProviderAddr addrs.AbsProviderConfig + + Output **plans.ResourceInstanceChange + OutputState **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := *n.State + + if n.ProviderAddr.ProviderConfig.Type == "" { + if n.DeposedKey == "" { + panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if state == nil || state.Value.IsNull() { + return nil, nil + } + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff( + absAddr, n.DeposedKey.Generation(), + state.Value, + cty.NullVal(cty.DynamicPseudoType), + ) + }) + if err != nil { + return nil, err + } + + // Change is always the same for a destroy. We don't need the provider's + // help for this one. + // TODO: Should we give the provider an opportunity to veto this? + change := &plans.ResourceInstanceChange{ + Addr: absAddr, + DeposedKey: n.DeposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: state.Value, + After: cty.NullVal(cty.DynamicPseudoType), + }, + Private: state.Private, + ProviderAddr: n.ProviderAddr, + } + + // Call post-diff hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff( + absAddr, + n.DeposedKey.Generation(), + change.Action, + change.Before, + change.After, + ) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.Output = change + + if n.OutputState != nil { + // Record our proposed new state, which is nil because we're destroying. + *n.OutputState = nil + } + + return nil, nil +} + +// EvalReduceDiff is an EvalNode implementation that takes a planned resource +// instance change as might be produced by EvalDiff or EvalDiffDestroy and +// "simplifies" it to a single atomic action to be performed by a specific +// graph node. +// +// Callers must specify whether they are a destroy node or a regular apply +// node. If the result is NoOp then the given change requires no action for +// the specific graph node calling this and so evaluation of the that graph +// node should exit early and take no action. +// +// The object written to OutChange may either be identical to InChange or +// a new change object derived from InChange. Because of the former case, the +// caller must not mutate the object returned in OutChange. +type EvalReduceDiff struct { + Addr addrs.ResourceInstance + InChange **plans.ResourceInstanceChange + Destroy bool + OutChange **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { + in := *n.InChange + out := in.Simplify(n.Destroy) + if n.OutChange != nil { + *n.OutChange = out + } + if out.Action != in.Action { + if n.Destroy { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) + } + } + return nil, nil +} + +// EvalReadDiff is an EvalNode implementation that retrieves the planned +// change for a particular resource instance object. +type EvalReadDiff struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange +} + +func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + csrc := changes.GetResourceInstanceChange(addr, gen) + if csrc == nil { + log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) + return nil, nil + } + + change, err := csrc.Decode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) + } + if n.Change != nil { + *n.Change = change + } + + log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) + + return nil, nil +} + +// EvalWriteDiff is an EvalNode implementation that saves a planned change +// for an instance object into the set of global planned changes. +type EvalWriteDiff struct { + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange +} + +// TODO: test +func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + if n.Change == nil || *n.Change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + changes.RemoveResourceInstanceChange(addr, gen) + return nil, nil + } + + providerSchema := *n.ProviderSchema + change := *n.Change + + if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in EvalWriteDiff") + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) + } + + changes.AppendResourceInstanceChange(csrc) + if n.DeposedKey == states.NotDeposed { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) + } else { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go new file mode 100644 index 000000000..470f798b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_error.go @@ -0,0 +1,20 @@ +package terraform + +// EvalReturnError is an EvalNode implementation that returns an +// error if it is present. +// +// This is useful for scenarios where an error has been captured by +// another EvalNode (like EvalApply) for special EvalTree-based error +// handling, and that handling has completed, so the error should be +// returned normally. +type EvalReturnError struct { + Error *error +} + +func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { + if n.Error == nil { + return nil, nil + } + + return nil, *n.Error +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go new file mode 100644 index 000000000..711c625c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter.go @@ -0,0 +1,25 @@ +package terraform + +// EvalNodeFilterFunc is the callback used to replace a node with +// another to node. To not do the replacement, just return the input node. +type EvalNodeFilterFunc func(EvalNode) EvalNode + +// EvalNodeFilterable is an interface that can be implemented by +// EvalNodes to allow filtering of sub-elements. Note that this isn't +// a common thing to implement and you probably don't need it. +type EvalNodeFilterable interface { + EvalNode + Filter(EvalNodeFilterFunc) +} + +// EvalFilter runs the filter on the given node and returns the +// final filtered value. This should be called rather than checking +// the EvalNode directly since this will properly handle EvalNodeFilterables. +func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { + if f, ok := node.(EvalNodeFilterable); ok { + f.Filter(fn) + return node + } + + return fn(node) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go new file mode 100644 index 000000000..1a55f024a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_filter_operation.go @@ -0,0 +1,49 @@ +package terraform + +// EvalNodeOpFilterable is an interface that EvalNodes can implement +// to be filterable by the operation that is being run on Terraform. +type EvalNodeOpFilterable interface { + IncludeInOp(walkOperation) bool +} + +// EvalNodeFilterOp returns a filter function that filters nodes that +// include themselves in specific operations. +func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { + return func(n EvalNode) EvalNode { + include := true + if of, ok := n.(EvalNodeOpFilterable); ok { + include = of.IncludeInOp(op) + } + if include { + return n + } + + return EvalNoop{} + } +} + +// EvalOpFilter is an EvalNode implementation that is a proxy to +// another node but filters based on the operation. +type EvalOpFilter struct { + // Ops is the list of operations to include this node in. + Ops []walkOperation + + // Node is the node to execute + Node EvalNode +} + +// TODO: test +func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { + return EvalRaw(n.Node, ctx) +} + +// EvalNodeOpFilterable impl. +func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { + for _, v := range n.Ops { + if v == op { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go new file mode 100644 index 000000000..a63389a91 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_for_each.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// evaluateResourceForEachExpression interprets a "for_each" argument on a resource. +// +// Returns a cty.Value map, and diagnostics if necessary. It will return nil if +// the expression is nil, and is used to distinguish between an unset for_each and an +// empty map +func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) { + forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx) + if !known { + // Attach a diag as we do with count, with the same downsides + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`, + Subject: expr.Range().Ptr(), + }) + } + return forEachMap, diags +} + +// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression +// except that it handles an unknown result by returning an empty map and +// a known = false, rather than by reporting the unknown value as an error +// diagnostic. +func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) { + if expr == nil { + return nil, true, nil + } + + forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + diags = diags.Append(forEachDiags) + if diags.HasErrors() { + return nil, true, diags + } + + switch { + case forEachVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`, + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + case !forEachVal.IsKnown(): + return map[string]cty.Value{}, false, diags + } + + if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() || forEachVal.Type().IsTupleType() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()), + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + } + + // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap + // This also covers an empty set (toset([])) + if forEachVal.LengthInt() == 0 { + return map[string]cty.Value{}, true, diags + } + + if forEachVal.Type().IsSetType() { + if forEachVal.Type().ElementType() != cty.String { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid for_each set argument", + Detail: fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()), + Subject: expr.Range().Ptr(), + }) + return nil, true, diags + } + + // A set may contain unknown values that must be + // discovered by checking with IsWhollyKnown (which iterates through the + // structure), while for maps in cty, keys can never be unknown or null, + // thus the earlier IsKnown check suffices for maps + if !forEachVal.IsWhollyKnown() { + return map[string]cty.Value{}, false, diags + } + } + + return forEachVal.AsValueMap(), true, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go new file mode 100644 index 000000000..d6b46a1f2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_if.go @@ -0,0 +1,26 @@ +package terraform + +// EvalIf is an EvalNode that is a conditional. +type EvalIf struct { + If func(EvalContext) (bool, error) + Then EvalNode + Else EvalNode +} + +// TODO: test +func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { + yes, err := n.If(ctx) + if err != nil { + return nil, err + } + + if yes { + return EvalRaw(n.Then, ctx) + } else { + if n.Else != nil { + return EvalRaw(n.Else, ctx) + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go new file mode 100644 index 000000000..25a2aae06 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_import_state.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalImportState is an EvalNode implementation that performs an +// ImportState operation on a provider. This will return the imported +// states but won't modify any actual state. +type EvalImportState struct { + Addr addrs.ResourceInstance + Provider *providers.Interface + ID string + Output *[]providers.ImportedResource +} + +// TODO: test +func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + provider := *n.Provider + var diags tfdiags.Diagnostics + + { + // Call pre-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreImportState(absAddr, n.ID) + }) + if err != nil { + return nil, err + } + } + + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: n.Addr.Resource.Type, + ID: n.ID, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + imported := resp.ImportedResources + + for _, obj := range imported { + log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) + } + + if n.Output != nil { + *n.Output = imported + } + + { + // Call post-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostImportState(absAddr, imported) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalImportStateVerify verifies the state after ImportState and +// after the refresh to make sure it is non-nil and valid. +type EvalImportStateVerify struct { + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + state := *n.State + if state.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", + n.Addr.String(), + ), + )) + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go new file mode 100644 index 000000000..5ab6b44f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_lang.go @@ -0,0 +1,61 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// EvalConfigBlock is an EvalNode implementation that takes a raw +// configuration block and evaluates any expressions within it. +// +// ExpandedConfig is populated with the result of expanding any "dynamic" +// blocks in the given body, which can be useful for extracting correct source +// location information for specific attributes in the result. +type EvalConfigBlock struct { + Config *hcl.Body + Schema *configschema.Block + SelfAddr addrs.Referenceable + Output *cty.Value + ExpandedConfig *hcl.Body + ContinueOnErr bool +} + +func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { + val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) + if diags.HasErrors() && n.ContinueOnErr { + log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) + return nil, EvalEarlyExitError{} + } + + if n.Output != nil { + *n.Output = val + } + if n.ExpandedConfig != nil { + *n.ExpandedConfig = body + } + + return nil, diags.ErrWithWarnings() +} + +// EvalConfigExpr is an EvalNode implementation that takes a raw configuration +// expression and evaluates it. +type EvalConfigExpr struct { + Expr hcl.Expression + SelfAddr addrs.Referenceable + Output *cty.Value +} + +func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) + + if n.Output != nil { + *n.Output = val + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go new file mode 100644 index 000000000..031019380 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_local.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Addr addrs.LocalValue + Expr hcl.Expression +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + // We ignore diags here because any problems we might find will be found + // again in EvaluateExpr below. + refs, _ := lang.ReferencesInExpr(n.Expr) + for _, ref := range refs { + if ref.Subject == n.Addr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referencing local value", + Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), + Subject: ref.SourceRange.ToHCL().Ptr(), + Context: n.Expr.Range().Ptr(), + }) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } + + val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() + } + + state := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Addr addrs.LocalValue +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state := ctx.State() + if state == nil { + return nil, nil + } + + state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go new file mode 100644 index 000000000..f4bc8225c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_noop.go @@ -0,0 +1,8 @@ +package terraform + +// EvalNoop is an EvalNode that does nothing. +type EvalNoop struct{} + +func (EvalNoop) Eval(EvalContext) (interface{}, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go new file mode 100644 index 000000000..9f71e92f6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_output.go @@ -0,0 +1,135 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// EvalDeleteOutput is an EvalNode implementation that deletes an output +// from the state. +type EvalDeleteOutput struct { + Addr addrs.OutputValue +} + +// TODO: test +func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { + state := ctx.State() + if state == nil { + return nil, nil + } + + state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) + return nil, nil +} + +// EvalWriteOutput is an EvalNode implementation that writes the output +// for the given name to the current state. +type EvalWriteOutput struct { + Addr addrs.OutputValue + Sensitive bool + Expr hcl.Expression + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool +} + +// TODO: test +func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + addr := n.Addr.Absolute(ctx.Path()) + + // This has to run before we have a state lock, since evaluation also + // reads the state + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + // We'll handle errors below, after we have loaded the module. + + state := ctx.State() + if state == nil { + return nil, nil + } + + changes := ctx.Changes() // may be nil, if we're not working on a changeset + + // handling the interpolation error + if diags.HasErrors() { + if n.ContinueOnErr || flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) + // if we're continuing, make sure the output is included, and + // marked as unknown. If the evaluator was able to find a type + // for the value in spite of the error then we'll use it. + n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) + return nil, EvalEarlyExitError{} + } + return nil, diags.Err() + } + + n.setValue(addr, state, changes, val) + + return nil, nil +} + +func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { + if val.IsKnown() && !val.IsNull() { + // The state itself doesn't represent unknown values, so we null them + // out here and then we'll save the real unknown value in the planned + // changeset below, if we have one on this graph walk. + log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) + stateVal := cty.UnknownAsNull(val) + state.SetOutputValue(addr, stateVal, n.Sensitive) + } else { + log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) + state.RemoveOutputValue(addr) + } + + // If we also have an active changeset then we'll replicate the value in + // there. This is used in preference to the state where present, since it + // *is* able to represent unknowns, while the state cannot. + if changes != nil { + // For the moment we are not properly tracking changes to output + // values, and just marking them always as "Create" or "Destroy" + // actions. A future release will rework the output lifecycle so we + // can track their changes properly, in a similar way to how we work + // with resource instances. + + var change *plans.OutputChange + if !val.IsNull() { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + Action: plans.Create, + Before: cty.NullVal(cty.DynamicPseudoType), + After: val, + }, + } + } else { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + // This is just a weird placeholder delete action since + // we don't have an actual prior value to indicate. + // FIXME: Generate real planned changes for output values + // that include the old values. + Action: plans.Delete, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, + } + } + + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) + } + log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) + changes.RemoveOutputChange(addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go new file mode 100644 index 000000000..7440cff7a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provider.go @@ -0,0 +1,147 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { + var configBody hcl.Body + if config != nil { + configBody = config.Config + } + + var inputBody hcl.Body + inputConfig := ctx.ProviderInput(addr) + if len(inputConfig) > 0 { + inputBody = configs.SynthBody("", inputConfig) + } + + switch { + case configBody != nil && inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) + // Note that the inputBody is the _base_ here, because configs.MergeBodies + // expects the base have all of the required fields, while these are + // forced to be optional for the override. The input process should + // guarantee that we have a value for each of the required arguments and + // that in practice the sets of attributes in each body will be + // disjoint. + return configs.MergeBodies(inputBody, configBody) + case configBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) + return configBody + case inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) + return inputBody + default: + log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) + return hcl.EmptyBody() + } +} + +// EvalConfigProvider is an EvalNode implementation that configures +// a provider that is already initialized and retrieved. +type EvalConfigProvider struct { + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider +} + +func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil { + return nil, fmt.Errorf("EvalConfigProvider Provider is nil") + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + config := n.Config + + configBody := buildProviderConfig(ctx, n.Addr, config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configDiags := ctx.ConfigureProvider(n.Addr, configVal) + configDiags = configDiags.InConfigBody(configBody) + + return nil, configDiags.ErrWithWarnings() +} + +// EvalInitProvider is an EvalNode implementation that initializes a provider +// and returns nothing. The provider can be retrieved again with the +// EvalGetProvider node. +type EvalInitProvider struct { + TypeName string + Addr addrs.ProviderConfig +} + +func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvider(n.TypeName, n.Addr) +} + +// EvalCloseProvider is an EvalNode implementation that closes provider +// connections that aren't needed anymore. +type EvalCloseProvider struct { + Addr addrs.ProviderConfig +} + +func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvider(n.Addr) + return nil, nil +} + +// EvalGetProvider is an EvalNode implementation that retrieves an already +// initialized provider instance for the given name. +// +// Unlike most eval nodes, this takes an _absolute_ provider configuration, +// because providers can be passed into and inherited between modules. +// Resource nodes must therefore know the absolute path of the provider they +// will use, which is usually accomplished by implementing +// interface GraphNodeProviderConsumer. +type EvalGetProvider struct { + Addr addrs.AbsProviderConfig + Output *providers.Interface + + // If non-nil, Schema will be updated after eval to refer to the + // schema of the provider. + Schema **ProviderSchema +} + +func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { + if n.Addr.ProviderConfig.Type == "" { + // Should never happen + panic("EvalGetProvider used with uninitialized provider configuration address") + } + + result := ctx.Provider(n.Addr) + if result == nil { + return nil, fmt.Errorf("provider %s not initialized", n.Addr) + } + + if n.Output != nil { + *n.Output = result + } + + if n.Schema != nil { + *n.Schema = ctx.ProviderSchema(n.Addr) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go new file mode 100644 index 000000000..405ce9d0b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_provisioner.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner +// and returns nothing. The provisioner can be retrieved again with the +// EvalGetProvisioner node. +type EvalInitProvisioner struct { + Name string +} + +func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvisioner(n.Name) +} + +// EvalCloseProvisioner is an EvalNode implementation that closes provisioner +// connections that aren't needed anymore. +type EvalCloseProvisioner struct { + Name string +} + +func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvisioner(n.Name) + return nil, nil +} + +// EvalGetProvisioner is an EvalNode implementation that retrieves an already +// initialized provisioner instance for the given name. +type EvalGetProvisioner struct { + Name string + Output *provisioners.Interface + Schema **configschema.Block +} + +func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provisioner(n.Name) + if result == nil { + return nil, fmt.Errorf("provisioner %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + if n.Schema != nil { + *n.Schema = ctx.ProvisionerSchema(n.Name) + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go new file mode 100644 index 000000000..0b734b793 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_read_data.go @@ -0,0 +1,395 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalReadData is an EvalNode implementation that deals with the main part +// of the data resource lifecycle: either actually reading from the data source +// or generating a plan to do so. +type EvalReadData struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // Planned is set when dealing with data resources that were deferred to + // the apply walk, to let us see what was planned. If this is set, the + // evaluation of the config is required to produce a wholly-known + // configuration which is consistent with the partial object included + // in this planned change. + Planned **plans.ResourceInstanceChange + + // ForcePlanRead, if true, overrides the usual behavior of immediately + // reading from the data source where possible, instead forcing us to + // _always_ generate a plan. This is used during the plan walk, since we + // mustn't actually apply anything there. (The resulting state doesn't + // get persisted) + ForcePlanRead bool + + // The result from this EvalNode has a few different possibilities + // depending on the input: + // - If Planned is nil then we assume we're aiming to _produce_ the plan, + // and so the following two outcomes are possible: + // - OutputChange.Action is plans.NoOp and OutputState is the complete + // result of reading from the data source. This is the easy path. + // - OutputChange.Action is plans.Read and OutputState is a planned + // object placeholder (states.ObjectPlanned). In this case, the + // returned change must be recorded in the overral changeset and + // eventually passed to another instance of this struct during the + // apply walk. + // - If Planned is non-nil then we assume we're aiming to complete a + // planned read from an earlier plan walk. In this case the only possible + // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp + // change and put the complete resulting state in OutputState, ready to + // be saved in the overall state and used for expression evaluation. + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputConfigValue *cty.Value + OutputState **states.ResourceInstanceObject +} + +func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadData: working on %s", absAddr) + + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema not available for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + var change *plans.ResourceInstanceChange + var configVal cty.Value + + // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and + // EvalReadDataApply did, but it seems like we should handle that via a + // separate mechanism since it boils down to just deleting the object from + // the state... and we do that on every plan anyway, forcing the data + // resource to re-read. + + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) + } + + // We'll always start by evaluating the configuration. What we do after + // that will depend on the evaluation result along with what other inputs + // we were given. + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time + + forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + keyData := EvalDataForInstanceKey(n.Addr.Key, forEach) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) + + // If our configuration contains any unknown values then we must defer the + // read to the apply phase by producing a "Read" change for this resource, + // and a placeholder value for it in the state. + if n.ForcePlanRead || !configVal.IsWhollyKnown() { + // If the configuration is still unknown when we're applying a planned + // change then that indicates a bug in Terraform, since we should have + // everything resolved by now. + if n.Planned != nil && *n.Planned != nil { + return nil, fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + absAddr, + ) + } + if n.ForcePlanRead { + log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) + } else { + log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) + } + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectPlanned, // because the partial value in the plan must be used for now + Dependencies: n.Dependencies, + } + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() + } + + if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + return nil, fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", + (*n.Planned).Action, absAddr, + ) + } + + log.Printf("[TRACE] Re-validating config for %s", absAddr) + validateResp := provider.ValidateDataSourceConfig( + providers.ValidateDataSourceConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) + + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) + }) + if err != nil { + return nil, err + } + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + newVal := resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + } + if !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + + // Since we've completed the read, we actually have no change to make, but + // we'll produce a NoOp one anyway to preserve the usual flow of the + // plan phase and allow it to produce a complete plan. + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.NoOp, + Before: newVal, + After: newVal, + }, + } + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectReady, // because we completed the read from the provider + Dependencies: n.Dependencies, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } + if n.OutputState != nil { + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() +} + +// EvalReadDataApply is an EvalNode implementation that executes a data +// resource's ReadDataApply method to read data from the data source. +type EvalReadDataApply struct { + Addr addrs.ResourceInstance + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + Config *configs.Resource + Change **plans.ResourceInstanceChange + StateReferences []addrs.Referenceable +} + +func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + change := *n.Change + providerSchema := *n.ProviderSchema + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics + + // If the diff is for *destroying* this resource then we'll + // just drop its state and move on, since data resources don't + // support an actual "destroy" action. + if change != nil && change.Action == plans.Delete { + if n.Output != nil { + *n.Output = nil + } + return nil, nil + } + + // For the purpose of external hooks we present a data apply as a + // "Refresh" rather than an "Apply" because creating a data source + // is presented to users/callers as a "read" operation. + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) + }) + if err != nil { + return nil, err + } + + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: change.After, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) + } + + newVal := resp.State + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + Dependencies: n.StateReferences, + } + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go new file mode 100644 index 000000000..6a834445c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_refresh.go @@ -0,0 +1,106 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalRefresh is an EvalNode implementation that does a refresh for +// a resource. +type EvalRefresh struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + Output **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics + + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) + return nil, diags.ErrWithWarnings() + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + // Call pre-refresh hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(absAddr, states.CurrentGen, state.Value) + }) + if err != nil { + return nil, diags.ErrWithWarnings() + } + + // Refresh! + priorVal := state.Value + req := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: priorVal, + Private: state.Private, + } + + provider := *n.Provider + resp := provider.ReadResource(req) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + newState := state.DeepCopy() + newState.Value = resp.NewState + newState.Private = resp.Private + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = newState + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go new file mode 100644 index 000000000..7d6bb6603 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_sequence.go @@ -0,0 +1,42 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalSequence is an EvalNode that evaluates in sequence. +type EvalSequence struct { + Nodes []EvalNode +} + +func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + for _, n := range n.Nodes { + if n == nil { + continue + } + + if _, err := EvalRaw(n, ctx); err != nil { + if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { + // In this path we abort early, losing any non-error + // diagnostics we saw earlier. + return nil, err + } + diags = diags.Append(err) + if diags.HasErrors() { + // Halt if we get some errors, but warnings are okay. + break + } + } + } + + return nil, diags.ErrWithWarnings() +} + +// EvalNodeFilterable impl. +func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { + for i, node := range n.Nodes { + n.Nodes[i] = fn(node) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go new file mode 100644 index 000000000..70a72bbdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state.go @@ -0,0 +1,475 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalReadState is an EvalNode implementation that reads the +// current object for a specific instance in the state. +type EvalReadState struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject +} + +func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadState used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadState used with no ProviderSchema object") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) + + src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = obj + } + return obj, nil +} + +// EvalReadStateDeposed is an EvalNode implementation that reads the +// deposed InstanceState for a specific resource out of the state +type EvalReadStateDeposed struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key identifies which deposed object we will read. + Key states.DeposedKey + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject +} + +func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadStateDeposed used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadStateDeposed used with no ProviderSchema object") + } + + key := n.Key + if key == states.NotDeposed { + return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") + } + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) + + src := ctx.State().ResourceInstanceObject(absAddr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + if n.Output != nil { + *n.Output = obj + } + return obj, nil +} + +// EvalRequireState is an EvalNode implementation that exits early if the given +// object is null. +type EvalRequireState struct { + State **states.ResourceInstanceObject +} + +func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + return nil, EvalEarlyExitError{} + } + + state := *n.State + if state == nil || state.Value.IsNull() { + return nil, EvalEarlyExitError{} + } + + return nil, nil +} + +// EvalUpdateStateHook is an EvalNode implementation that calls the +// PostStateUpdate hook with the current state. +type EvalUpdateStateHook struct{} + +func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +// EvalWriteState is an EvalNode implementation that saves the given object +// as the current object for the selected resource instance. +type EvalWriteState struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig +} + +func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteState used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + if n.ProviderAddr.ProviderConfig.Type == "" { + return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) + } + + obj := *n.State + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteState used with pointer to nil ProviderSchema object") + } + + if obj != nil { + log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) + } else { + log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) + return nil, nil +} + +// EvalWriteStateDeposed is an EvalNode implementation that writes +// an InstanceState out to the Deposed list of a resource in the state. +type EvalWriteStateDeposed struct { + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key indicates which deposed object to write to. + Key states.DeposedKey + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig +} + +func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteStateDeposed used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + key := n.Key + state := ctx.State() + + if key == states.NotDeposed { + // should never happen + return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) + } + + obj := *n.State + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteStateDeposed used with no ProviderSchema object") + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) + return nil, nil +} + +// EvalDeposeState is an EvalNode implementation that moves the current object +// for the given instance to instead be a deposed object, leaving the instance +// with no current object. +// This is used at the beginning of a create-before-destroy replace action so +// that the create can create while preserving the old state of the +// to-be-destroyed object. +type EvalDeposeState struct { + Addr addrs.ResourceInstance + + // ForceKey, if a value other than states.NotDeposed, will be used as the + // key for the newly-created deposed object that results from this action. + // If set to states.NotDeposed (the zero value), a new unique key will be + // allocated. + ForceKey states.DeposedKey + + // OutputKey, if non-nil, will be written with the deposed object key that + // was generated for the object. This can then be passed to + // EvalUndeposeState.Key so it knows which deposed instance to forget. + OutputKey *states.DeposedKey +} + +// TODO: test +func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + var key states.DeposedKey + if n.ForceKey == states.NotDeposed { + key = state.DeposeResourceInstanceObject(absAddr) + } else { + key = n.ForceKey + state.DeposeResourceInstanceObjectForceKey(absAddr, key) + } + log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) + + if n.OutputKey != nil { + *n.OutputKey = key + } + + return nil, nil +} + +// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will +// restore a particular deposed object of the specified resource instance +// to be the "current" object if and only if the instance doesn't currently +// have a current object. +// +// This is intended for use when the create leg of a create before destroy +// fails with no partial new object: if we didn't take any action, the user +// would be left in the unfortunate situation of having no current object +// and the previously-workign object now deposed. This EvalNode causes a +// better outcome by restoring things to how they were before the replace +// operation began. +// +// The create operation may have produced a partial result even though it +// failed and it's important that we don't "forget" that state, so in that +// situation the prior object remains deposed and the partial new object +// remains the current object, allowing the situation to hopefully be +// improved in a subsequent run. +type EvalMaybeRestoreDeposedObject struct { + Addr addrs.ResourceInstance + + // Key is a pointer to the deposed object key that should be forgotten + // from the state, which must be non-nil. + Key *states.DeposedKey +} + +// TODO: test +func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + dk := *n.Key + state := ctx.State() + + restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) + if restored { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) + } else { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) + } + + return nil, nil +} + +// EvalWriteResourceState is an EvalNode implementation that ensures that +// a suitable resource-level state record is present in the state, if that's +// required for the "each mode" of that resource. +// +// This is important primarily for the situation where count = 0, since this +// eval is the only change we get to set the resource "each mode" to list +// in that case, allowing expression evaluation to see it as a zero-element +// list rather than as not set at all. +type EvalWriteResourceState struct { + Addr addrs.Resource + Config *configs.Resource + ProviderAddr addrs.AbsProviderConfig +} + +// TODO: test +func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + eachMode := states.NoEach + if count >= 0 { // -1 signals "count not set" + eachMode = states.EachList + } + + forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + if forEach != nil { + eachMode = states.EachMap + } + + // This method takes care of all of the business logic of updating this + // while ensuring that any existing instances are preserved, etc. + state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) + + return nil, nil +} + +// EvalForgetResourceState is an EvalNode implementation that prunes out an +// empty resource-level state for a given resource address, or produces an +// error if it isn't empty after all. +// +// This should be the last action taken for a resource that has been removed +// from the configuration altogether, to clean up the leftover husk of the +// resource in the state after other EvalNodes have destroyed and removed +// all of the instances and instance objects beneath it. +type EvalForgetResourceState struct { + Addr addrs.Resource +} + +func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + pruned := state.RemoveResourceIfEmpty(absAddr) + if !pruned { + // If this produces an error, it indicates a bug elsewhere in Terraform + // -- probably missing graph nodes, graph edges, or + // incorrectly-implemented evaluation steps. + return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) + } + log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go new file mode 100644 index 000000000..27d5f212e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_state_upgrade.go @@ -0,0 +1,106 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// UpgradeResourceState will, if necessary, run the provider-defined upgrade +// logic against the given state object to make it compliant with the +// current schema version. This is a no-op if the given state object is +// already at the latest version. +// +// If any errors occur during upgrade, error diagnostics are returned. In that +// case it is not safe to proceed with using the original state object. +func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + // We only do state upgrading for managed resources. + return src, nil + } + + stateIsFlatmap := len(src.AttrsJSON) == 0 + + providerType := addr.Resource.Resource.DefaultProviderConfig().Type + if src.SchemaVersion > currentVersion { + log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance managed by newer provider version", + // This is not a very good error message, but we don't retain enough + // information in state to give good feedback on what provider + // version might be required here. :( + fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), + )) + return nil, diags + } + + // If we get down here then we need to upgrade the state, with the + // provider's help. + // If this state was originally created by a version of Terraform prior to + // v0.12, this also includes translating from legacy flatmap to new-style + // representation, since only the provider has enough information to + // understand a flatmap built against an older schema. + if src.SchemaVersion != currentVersion { + log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + } else { + log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) + } + + req := providers.UpgradeResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + + // TODO: The internal schema version representations are all using + // uint64 instead of int64, but unsigned integers aren't friendly + // to all protobuf target languages so in practice we use int64 + // on the wire. In future we will change all of our internal + // representations to int64 too. + Version: int64(src.SchemaVersion), + } + + if stateIsFlatmap { + req.RawStateFlatmap = src.AttrsFlat + } else { + req.RawStateJSON = src.AttrsJSON + } + + resp := provider.UpgradeResourceState(req) + diags := resp.Diagnostics + if diags.HasErrors() { + return nil, diags + } + + // After upgrading, the new value must conform to the current schema. When + // going over RPC this is actually already ensured by the + // marshaling/unmarshaling of the new value, but we'll check it here + // anyway for robustness, e.g. for in-process providers. + newValue := resp.UpgradedState + if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource state upgrade", + fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), + )) + } + return nil, diags + } + + new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) + if err != nil { + // We already checked for type conformance above, so getting into this + // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to encode result of resource state upgrade", + fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), + )) + } + return new, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go new file mode 100644 index 000000000..a4f28bd90 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate.go @@ -0,0 +1,588 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EvalValidateCount is an EvalNode implementation that validates +// the count of a resource. +type EvalValidateCount struct { + Resource *configs.Resource +} + +// TODO: test +func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + var count int + var err error + + val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + goto RETURN + } + if val.IsNull() || !val.IsKnown() { + goto RETURN + } + + err = gocty.FromCtyValue(val, &count) + if err != nil { + // The EvaluateExpr call above already guaranteed us a number value, + // so if we end up here then we have something that is out of range + // for an int, and the error message will include a description of + // the valid range. + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), + Subject: n.Resource.Count.Range().Ptr(), + }) + } else if count < 0 { + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), + Subject: n.Resource.Count.Range().Ptr(), + }) + } + +RETURN: + return nil, diags.NonFatalErr() +} + +// EvalValidateProvider is an EvalNode implementation that validates +// a provider configuration. +type EvalValidateProvider struct { + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider +} + +func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + provider := *n.Provider + + configBody := buildProviderConfig(ctx, n.Addr, n.Config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + if configSchema == nil { + // Should never happen in real code, but often comes up in tests where + // mock schemas are being used that tend to be incomplete. + log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) + configSchema = &configschema.Block{} + } + + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + req := providers.PrepareProviderConfigRequest{ + Config: configVal, + } + + validateResp := provider.PrepareProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics) + + return nil, diags.NonFatalErr() +} + +// EvalValidateProvisioner is an EvalNode implementation that validates +// the configuration of a provisioner belonging to a resource. The provisioner +// config is expected to contain the merged connection configurations. +type EvalValidateProvisioner struct { + ResourceAddr addrs.Resource + Provisioner *provisioners.Interface + Schema **configschema.Block + Config *configs.Provisioner + ResourceHasCount bool + ResourceHasForEach bool +} + +func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { + provisioner := *n.Provisioner + config := *n.Config + schema := *n.Schema + + var diags tfdiags.Diagnostics + + { + // Validate the provisioner's own config first + + configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return nil, fmt.Errorf("EvaluateBlock returned nil value") + } + + req := provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) + } + + { + // Now validate the connection config, which contains the merged bodies + // of the resource and provisioner connection blocks. + connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) + diags = diags.Append(connDiags) + } + + return nil, diags.NonFatalErr() +} + +func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + var diags tfdiags.Diagnostics + + if config == nil || config.Config == nil { + // No block to validate + return diags + } + + // We evaluate here just by evaluating the block and returning any + // diagnostics we get, since evaluation alone is enough to check for + // extraneous arguments and incorrectly-typed arguments. + _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) + diags = diags.Append(configDiags) + + return diags +} + +func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr.Instance(addrs.NoKey) + + if n.ResourceHasCount { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) + } else if n.ResourceHasForEach { + // For a resource that has for_each, we allow each.value and each.key + // but don't know at this stage what it will return. + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.DynamicVal, + } + + // "self" can't point to an unknown key, but we'll force it to be + // key "" here, which should return an unknown value of the + // expected type since none of these elements are known at + // this point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.StringKey("")) + } + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + + // For type=ssh only (enforced in ssh communicator) + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + "bastion_certificate": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. It's exported only for use in the +// configs/configupgrade package and should not be used from anywhere else. +// The caller may not modify any part of the returned schema data structure. +func ConnectionBlockSupersetSchema() *configschema.Block { + return connectionBlockSupersetSchema +} + +// EvalValidateResource is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateResource struct { + Addr addrs.Resource + Provider *providers.Interface + ProviderSchema **ProviderSchema + Config *configs.Resource + + // IgnoreWarnings means that warnings will not be passed through. This allows + // "just-in-time" passes of validation to continue execution through warnings. + IgnoreWarnings bool + + // ConfigVal, if non-nil, will be updated with the value resulting from + // evaluating the given configuration body. Since validation is performed + // very early, this value is likely to contain lots of unknown values, + // but its type will conform to the schema of the resource type associated + // with the resource instance being validated. + ConfigVal *cty.Value +} + +func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + cfg := *n.Config + schema := *n.ProviderSchema + mode := cfg.Mode + + keyData := EvalDataForNoInstanceKey + if n.Config.Count != nil { + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := n.validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + } + + if n.Config.ForEach != nil { + keyData = InstanceKeyEvalData{ + EachKey: cty.UnknownVal(cty.String), + EachValue: cty.UnknownVal(cty.DynamicPseudoType), + } + + // Evaluate the for_each expression here so we can expose the diagnostics + forEachDiags := n.validateForEach(ctx, n.Config.ForEach) + diags = diags.Append(forEachDiags) + } + + for _, traversal := range n.Config.DependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if !refDiags.HasErrors() && len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch mode { + case addrs.ManagedResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range cfg.Managed.IgnoreChanges { + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + } + } + + req := providers.ValidateResourceTypeConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateResourceTypeConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + + if n.ConfigVal != nil { + *n.ConfigVal = configVal + } + + case addrs.DataResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + req := providers.ValidateDataSourceConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateDataSourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + } + + if n.IgnoreWarnings { + // If we _only_ have warnings then we'll return nil. + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + return nil, nil + } else { + // We'll return an error if there are any diagnostics at all, even if + // some of them are warnings. + return nil, diags.NonFatalErr() + } +} + +func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { + if expr == nil { + return nil + } + + var diags tfdiags.Diagnostics + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return diags + } + + if countVal.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + var err error + countVal, err = convert.Convert(countVal, cty.Number) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk. + if !countVal.IsKnown() { + return diags + } + + // If we _do_ know the value, then we can do a few more checks here. + var count int + err = gocty.FromCtyValue(countVal, &count) + if err != nil { + // Isn't a whole number, etc. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + return diags +} + +func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) { + _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx) + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk + if !known { + return diags + } + + if forEachDiags.HasErrors() { + diags = diags.Append(forEachDiags) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go new file mode 100644 index 000000000..c9cc0e6da --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_validate_selfref.go @@ -0,0 +1,67 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that +// expressions within a particular referencable block do not reference that +// same block. +type EvalValidateSelfRef struct { + Addr addrs.Referenceable + Config hcl.Body + ProviderSchema **ProviderSchema +} + +func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + addr := n.Addr + + addrStrs := make([]string, 0, 1) + addrStrs = append(addrStrs, addr.String()) + switch tAddr := addr.(type) { + case addrs.ResourceInstance: + // A resource instance may not refer to its containing resource either. + addrStrs = append(addrStrs, tAddr.ContainingResource().String()) + } + + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) + } + + providerSchema := *n.ProviderSchema + var schema *configschema.Block + switch tAddr := addr.(type) { + case addrs.Resource: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr) + case addrs.ResourceInstance: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) + } + + if schema == nil { + return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) + } + + refs, _ := lang.ReferencesInBlock(n.Config, schema) + for _, ref := range refs { + for _, addrStr := range addrStrs { + if ref.Subject.String() == addrStr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referential block", + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + } + + return nil, diags.NonFatalErr() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go new file mode 100644 index 000000000..79f44b3fe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/eval_variable.go @@ -0,0 +1,96 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// EvalSetModuleCallArguments is an EvalNode implementation that sets values +// for arguments of a child module call, for later retrieval during +// expression evaluation. +type EvalSetModuleCallArguments struct { + Module addrs.ModuleCallInstance + Values map[string]cty.Value +} + +// TODO: test +func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetModuleCallArguments(n.Module, n.Values) + return nil, nil +} + +// EvalModuleCallArgument is an EvalNode implementation that produces the value +// for a particular variable as will be used by a child module instance. +// +// The result is written into the map given in Values, with its key +// set to the local name of the variable, disregarding the module instance +// address. Any existing values in that map are deleted first. This weird +// interface is a result of trying to be convenient for use with +// EvalContext.SetModuleCallArguments, which expects a map to merge in with +// any existing arguments. +type EvalModuleCallArgument struct { + Addr addrs.InputVariable + Config *configs.Variable + Expr hcl.Expression + + // If this flag is set, any diagnostics are discarded and this operation + // will always succeed, though may produce an unknown value in the + // event of an error. + IgnoreDiagnostics bool + + Values map[string]cty.Value +} + +func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { + // Clear out the existing mapping + for k := range n.Values { + delete(n.Values, k) + } + + wantType := n.Config.Type + name := n.Addr.Name + expr := n.Expr + + if expr == nil { + // Should never happen, but we'll bail out early here rather than + // crash in case it does. We set no value at all in this case, + // making a subsequent call to EvalContext.SetModuleCallArguments + // a no-op. + log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) + return nil, nil + } + + val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + + // We intentionally passed DynamicPseudoType to EvaluateExpr above because + // now we can do our own local type conversion and produce an error message + // with better context if it fails. + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for module argument", + Detail: fmt.Sprintf( + "The given value is not suitable for child module variable %q defined at %s: %s.", + name, n.Config.DeclRange.String(), convErr, + ), + Subject: expr.Range().Ptr(), + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + n.Values[name] = val + if n.IgnoreDiagnostics { + return nil, nil + } + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go new file mode 100644 index 000000000..d4a8d3cf7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaltree_provider.go @@ -0,0 +1,88 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" +) + +// ProviderEvalTree returns the evaluation tree for initializing and +// configuring providers. +func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { + var provider providers.Interface + + addr := n.Addr + relAddr := addr.ProviderConfig + + seq := make([]EvalNode, 0, 5) + seq = append(seq, &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + }) + + // Input stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + }, + }, + }) + + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkValidate}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + &EvalValidateProvider{ + Addr: relAddr, + Provider: &provider, + Config: config, + }, + }, + }, + }) + + // Apply stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: addr, + Output: &provider, + }, + }, + }, + }) + + // We configure on everything but validate, since validate may + // not have access to all the variables. + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalConfigProvider{ + Addr: relAddr, + Provider: &provider, + Config: config, + }, + }, + }, + }) + + return &EvalSequence{Nodes: seq} +} + +// CloseProviderEvalTree returns the evaluation tree for closing +// provider connections that aren't needed anymore. +func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { + return &EvalCloseProvider{Addr: addr.ProviderConfig} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go new file mode 100644 index 000000000..2d3eabd48 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate.go @@ -0,0 +1,838 @@ +package terraform + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/agext/levenshtein" + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Evaluator provides the necessary contextual data for evaluating expressions +// for a particular walk operation. +type Evaluator struct { + // Operation defines what type of operation this evaluator is being used + // for. + Operation walkOperation + + // Meta is contextual metadata about the current operation. + Meta *ContextMeta + + // Config is the root node in the configuration tree. + Config *configs.Config + + // VariableValues is a map from variable names to their associated values, + // within the module indicated by ModulePath. VariableValues is modified + // concurrently, and so it must be accessed only while holding + // VariableValuesLock. + // + // The first map level is string representations of addr.ModuleInstance + // values, while the second level is variable names. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + // Schemas is a repository of all of the schemas we should need to + // evaluate expressions. This must be constructed by the caller to + // include schemas for all of the providers, resource types, data sources + // and provisioners used by the given configuration and state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // State is the current state, embedded in a wrapper that ensures that + // it can be safely accessed and modified concurrently. + State *states.SyncState + + // Changes is the set of proposed changes, embedded in a wrapper that + // ensures they can be safely accessed and modified concurrently. + Changes *plans.ChangesSync +} + +// Scope creates an evaluation scope for the given module path and optional +// resource. +// +// If the "self" argument is nil then the "self" object is not available +// in evaluated expressions. Otherwise, it behaves as an alias for the given +// address. +func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { + return &lang.Scope{ + Data: data, + SelfAddr: self, + PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, + BaseDir: ".", // Always current working directory for now. + } +} + +// evaluationStateData is an implementation of lang.Data that resolves +// references primarily (but not exclusively) using information from a State. +type evaluationStateData struct { + Evaluator *Evaluator + + // ModulePath is the path through the dynamic module tree to the module + // that references will be resolved relative to. + ModulePath addrs.ModuleInstance + + // InstanceKeyData describes the values, if any, that are accessible due + // to repetition of a containing object using "count" or "for_each" + // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, + // since the user specifies in that case which variable name to locally + // shadow.) + InstanceKeyData InstanceKeyEvalData + + // Operation records the type of walk the evaluationStateData is being used + // for. + Operation walkOperation +} + +// InstanceKeyEvalData is used during evaluation to specify which values, +// if any, should be produced for count.index, each.key, and each.value. +type InstanceKeyEvalData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} + +// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for +// evaluating in a context that has the given instance key. +func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData { + var countIdx cty.Value + var eachKey cty.Value + var eachVal cty.Value + + if intKey, ok := key.(addrs.IntKey); ok { + countIdx = cty.NumberIntVal(int64(intKey)) + } + + if stringKey, ok := key.(addrs.StringKey); ok { + eachKey = cty.StringVal(string(stringKey)) + eachVal = forEachMap[string(stringKey)] + } + + return InstanceKeyEvalData{ + CountIndex: countIdx, + EachKey: eachKey, + EachValue: eachVal, + } +} + +// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance +// key values at all, suitable for use in contexts where no keyed instance +// is relevant. +var EvalDataForNoInstanceKey = InstanceKeyEvalData{} + +// evaluationStateData must implement lang.Data +var _ lang.Data = (*evaluationStateData)(nil) + +func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "index": + idxVal := d.InstanceKeyData.CountIndex + if idxVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.Number), diags + } + return idxVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + var returnVal cty.Value + switch addr.Name { + + case "key": + returnVal = d.InstanceKeyData.EachKey + case "value": + returnVal = d.InstanceKeyData.EachValue + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "each" attribute`, + Detail: fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + if returnVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "each" in context without for_each`, + Detail: fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.DynamicPseudoType), diags + } + return returnVal, diags +} + +func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Variables[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Variables { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared input variable`, + Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + wantType := cty.DynamicPseudoType + if config.Type != cty.NilType { + wantType = config.Type + } + + d.Evaluator.VariableValuesLock.Lock() + defer d.Evaluator.VariableValuesLock.Unlock() + + // During the validate walk, input variables are always unknown so + // that we are validating the configuration for all possible input values + // rather than for a specific set. Checking against a specific set of + // input values then happens during the plan walk. + // + // This is important because otherwise the validation walk will tend to be + // overly strict, requiring expressions throughout the configuration to + // be complicated to accommodate all possible inputs, whereas returning + // known here allows for simpler patterns like using input values as + // guards to broadly enable/disable resources, avoid processing things + // that are disabled, etc. Terraform's static validation leans towards + // being liberal in what it accepts because the subsequent plan walk has + // more information available and so can be more conservative. + if d.Operation == walkValidate { + return cty.UnknownVal(wantType), diags + } + + moduleAddrStr := d.ModulePath.String() + vals := d.Evaluator.VariableValues[moduleAddrStr] + if vals == nil { + return cty.UnknownVal(wantType), diags + } + + val, isSet := vals[addr.Name] + if !isSet { + if config.Default != cty.NilVal { + return config.Default, diags + } + return cty.UnknownVal(wantType), diags + } + + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + // We should never get here because this problem should've been caught + // during earlier validation, but we'll do something reasonable anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Incorrect variable type`, + Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), + Subject: &config.DeclRange, + }) + // Stub out our return value so that the semantic checker doesn't + // produce redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + return val, diags +} + +func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Locals[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Locals { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared local value`, + Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + return val, diags +} + +func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + moduleAddr := addr.ModuleInstance(d.ModulePath) + + // We'll consult the configuration to see what output names we are + // expecting, so we can ensure the resulting object is of the expected + // type even if our data is incomplete for some reason. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since this should've been caught during + // static validation. + panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) + } + outputConfigs := moduleConfig.Module.Outputs + + vals := map[string]cty.Value{} + for n := range outputConfigs { + addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) + vals[n] = cty.DynamicVal + continue + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + vals[n] = change.After + } else { + os := d.Evaluator.State.OutputValue(addr) + if os == nil { + // Not evaluated yet? + vals[n] = cty.DynamicVal + continue + } + vals[n] = os.Value + } + } + return cty.ObjectVal(vals), diags +} + +func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + absAddr := addr.AbsOutputValue(d.ModulePath) + moduleAddr := absAddr.Module + + // First we'll consult the configuration to see if an output of this + // name is declared at all. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // this doesn't happen in normal circumstances due to our validation + // pass, but it can turn up in some unusual situations, like in the + // "terraform console" repl where arbitrary expressions can be + // evaluated. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + config := moduleConfig.Module.Outputs[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Outputs { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared output value`, + Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) + return cty.DynamicVal, diags + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + return change.After, diags + } + + os := d.Evaluator.State.OutputValue(absAddr) + if os == nil { + // Not evaluated yet? + return cty.DynamicVal, diags + } + + return os.Value, diags +} + +func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "cwd": + wd, err := os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) + } + sourceDir := moduleConfig.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + case "root": + sourceDir := d.Evaluator.Config.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + default: + suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetResource(addr addrs.Resource, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // First we'll consult the configuration to see if an resource of this + // name is declared at all. + moduleAddr := d.ModulePath + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) + } + + config := moduleConfig.Module.ResourceByAddr(addr) + if config == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Type, addr.Name, moduleDisplayAddr(moduleAddr)), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + rs := d.Evaluator.State.Resource(addr.Absolute(d.ModulePath)) + + if rs == nil { + // we must return DynamicVal so that both interpretations + // can proceed without generating errors, and we'll deal with this + // in a later step where more information is gathered. + // (In practice we should only end up here during the validate walk, + // since later walks should have at least partial states populated + // for all resources in the configuration.) + return cty.DynamicVal, diags + } + + // Break out early during validation, because resource may not be expanded + // yet and indexed references may show up as invalid. + if d.Operation == walkValidate { + return cty.DynamicVal, diags + } + + return d.getResourceInstancesAll(addr, rng, config, rs, rs.ProviderConfig) +} + +func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + instAddr := addrs.ResourceInstance{Resource: addr, Key: addrs.NoKey} + + schema := d.getResourceSchema(addr, providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + switch rs.EachMode { + case states.NoEach: + ty := schema.ImpliedType() + is := rs.Instances[addrs.NoKey] + if is == nil || is.Current == nil { + // Assume we're dealing with an instance that hasn't been created yet. + return cty.UnknownVal(ty), diags + } + + if is.Current.Status == states.ObjectPlanned { + // If there's a pending change for this instance in our plan, we'll prefer + // that. This is important because the state can't represent unknown values + // and so its data is inaccurate when changes are pending. + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr.Absolute(d.ModulePath), states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + return val, diags + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + + return ios.Value, diags + + case states.EachList: + // We need to infer the length of our resulting tuple by searching + // for the max IntKey in our instances map. + length := 0 + for k := range rs.Instances { + if ik, ok := k.(addrs.IntKey); ok { + if int(ik) >= length { + length = int(ik) + 1 + } + } + } + + vals := make([]cty.Value, length) + for i := 0; i < length; i++ { + ty := schema.ImpliedType() + key := addrs.IntKey(i) + is, exists := rs.Instances[key] + if exists && is.Current != nil { + instAddr := addr.Instance(key).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = ios.Value + } else { + // There shouldn't normally be "gaps" in our list but we'll + // allow it under the assumption that we're in a weird situation + // where e.g. someone has run "terraform state mv" to reorder + // a list and left a hole behind. + vals[i] = cty.UnknownVal(schema.ImpliedType()) + } + } + + // We use a tuple rather than a list here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.TupleVal(vals), diags + + case states.EachMap: + ty := schema.ImpliedType() + vals := make(map[string]cty.Value, len(rs.Instances)) + for k, is := range rs.Instances { + if sk, ok := k.(addrs.StringKey); ok { + instAddr := addr.Instance(k).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = ios.Value + } + } + + // We use an object rather than a map here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.ObjectVal(vals), diags + + default: + // Should never happen since caller should deal with other modes + panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) + } +} + +func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { + providerType := providerAddr.ProviderConfig.Type + schemas := d.Evaluator.Schemas + schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + return schema +} + +func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "workspace": + workspaceName := d.Evaluator.Meta.Env + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} + +// moduleDisplayAddr returns a string describing the given module instance +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleDisplayAddr(addr addrs.ModuleInstance) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go new file mode 100644 index 000000000..35a8be0c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/evaluate_valid.go @@ -0,0 +1,299 @@ +package terraform + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl/v2" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/helper/didyoumean" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// StaticValidateReferences checks the given references against schemas and +// other statically-checkable rules, producing error diagnostics if any +// problems are found. +// +// If this method returns errors for a particular reference then evaluating +// that reference is likely to generate a very similar error, so callers should +// not run this method and then also evaluate the source expression(s) and +// merge the two sets of diagnostics together, since this will result in +// confusing redundant errors. +// +// This method can find more errors than can be found by evaluating an +// expression with a partially-populated scope, since it checks the referenced +// names directly against the schema rather than relying on evaluation errors. +// +// The result may include warning diagnostics if, for example, deprecated +// features are referenced. +func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, ref := range refs { + moreDiags := d.staticValidateReference(ref, self) + diags = diags.Append(moreDiags) + } + return diags +} + +func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if modCfg == nil { + // This is a bug in the caller rather than a problem with the + // reference, but rather than crashing out here in an unhelpful way + // we'll just ignore it and trust a different layer to catch it. + return nil + } + + if ref.Subject == addrs.Self { + // The "self" address is a special alias for the address given as + // our self parameter here, if present. + if self == nil { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + return diags + } + + synthRef := *ref // shallow copy + synthRef.Subject = self + ref = &synthRef + } + + switch addr := ref.Subject.(type) { + + // For static validation we validate both resource and resource instance references the same way. + // We mostly disregard the index, though we do some simple validation of + // its _presence_ in staticValidateSingleResourceReference and + // staticValidateMultiResourceReference respectively. + case addrs.Resource: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + return diags + case addrs.ResourceInstance: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) + return diags + + // We also handle all module call references the same way, disregarding index. + case addrs.ModuleCall: + return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstance: + return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallOutput: + // This one is a funny one because we will take the output name referenced + // and use it to fake up a "remaining" that would make sense for the + // module call itself, rather than for the specific output, and then + // we can just re-use our static module call validation logic. + remain := make(hcl.Traversal, len(ref.Remaining)+1) + copy(remain[1:], ref.Remaining) + remain[0] = hcl.TraverseAttr{ + Name: addr.Name, + + // Using the whole reference as the source range here doesn't exactly + // match how HCL would normally generate an attribute traversal, + // but is close enough for our purposes. + SrcRange: ref.SourceRange.ToHCL(), + } + return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) + + default: + // Anything else we'll just permit through without any static validation + // and let it be caught during dynamic evaluation, in evaluate.go . + return nil + } +} + +func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + // If we have at least one step in "remain" and this resource has + // "count" set then we know for sure this in invalid because we have + // something like: + // aws_instance.foo.bar + // ...when we really need + // aws_instance.foo[count.index].bar + + // It is _not_ safe to do this check when remain is empty, because that + // would also match aws_instance.foo[count.index].bar due to `count.index` + // not being statically-resolvable as part of a reference, and match + // direct references to the whole aws_instance.foo tuple. + if len(remain) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if cfg.Count != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + if cfg.ForEach != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + + return diags +} + +func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if addr.Key == addrs.NoKey { + // This is a different path into staticValidateSingleResourceReference + return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) + } else { + if cfg.Count == nil && cfg.ForEach == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unexpected resource instance key`, + Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + return diags +} + +func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + var modeAdjective string + switch addr.Mode { + case addrs.ManagedResourceMode: + modeAdjective = "managed" + case addrs.DataResourceMode: + modeAdjective = "data" + default: + // should never happen + modeAdjective = "" + } + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // Normally accessing this directly is wrong because it doesn't take into + // account provider inheritance, etc but it's okay here because we're only + // paying attention to the type anyway. + providerType := cfg.ProviderConfigAddr().Type + schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + + if schema == nil { + // Prior validation should've taken care of a resource block with an + // unsupported type, so we should never get here but we'll handle it + // here anyway for robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource type`, + Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // As a special case we'll detect attempts to access an attribute called + // "count" and produce a special error for it, since versions of Terraform + // prior to v0.12 offered this as a weird special case that we can no + // longer support. + if len(remain) > 0 { + if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource count attribute`, + Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + } + + // If we got this far then we'll try to validate the remaining traversal + // steps against our schema. + moreDiags := schema.StaticValidateTraversal(remain) + diags = diags.Append(moreDiags) + + return diags +} + +func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // For now, our focus here is just in testing that the referenced module + // call exists. All other validation is deferred until evaluation time. + _, exists := modCfg.Module.ModuleCalls[addr.Name] + if !exists { + var suggestions []string + for name := range modCfg.Module.ModuleCalls { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + return diags +} + +// moduleConfigDisplayAddr returns a string describing the given module +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleConfigDisplayAddr(addr addrs.Module) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go new file mode 100644 index 000000000..97c77bdbd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go new file mode 100644 index 000000000..36e295b6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph.go @@ -0,0 +1,141 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// Graph represents the graph that Terraform uses to represent resources +// and their dependencies. +type Graph struct { + // Graph is the actual DAG. This is embedded so you can call the DAG + // methods directly. + dag.AcyclicGraph + + // Path is the path in the module tree that this Graph represents. + Path addrs.ModuleInstance + + // debugName is a name for reference in the debug output. This is usually + // to indicate what topmost builder was, and if this graph is a shadow or + // not. + debugName string +} + +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph +} + +// Walk walks the graph with the given walker for callbacks. The graph +// will be walked with full parallelism, so the walker should expect +// to be called in concurrently. +func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { + return g.walk(walker) +} + +func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { + // The callbacks for enter/exiting a graph + ctx := walker.EnterPath(g.Path) + defer walker.ExitPath(g.Path) + + // Get the path for logs + path := ctx.Path().String() + + debugName := "walk-graph.json" + if g.debugName != "" { + debugName = g.debugName + "-" + debugName + } + + // Walk the graph. + var walkFn dag.WalkFunc + walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) + g.DebugVisitInfo(v, g.debugName) + + defer func() { + log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) + }() + + walker.EnterVertex(v) + defer walker.ExitVertex(v, diags) + + // vertexCtx is the context that we use when evaluating. This + // is normally the context of our graph but can be overridden + // with a GraphNodeSubPath impl. + vertexCtx := ctx + if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { + vertexCtx = walker.EnterPath(pn.Path()) + defer walker.ExitPath(pn.Path()) + } + + // If the node is eval-able, then evaluate it. + if ev, ok := v.(GraphNodeEvalable); ok { + tree := ev.EvalTree() + if tree == nil { + panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) + } + + // Allow the walker to change our tree if needed. Eval, + // then callback with the output. + log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) + + tree = walker.EnterEvalTree(v, tree) + output, err := Eval(tree, vertexCtx) + diags = diags.Append(walker.ExitEvalTree(v, output, err)) + if diags.HasErrors() { + return + } + } + + // If the node is dynamically expanded, then expand it + if ev, ok := v.(GraphNodeDynamicExpandable); ok { + log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) + + g, err := ev.DynamicExpand(vertexCtx) + if err != nil { + diags = diags.Append(err) + return + } + if g != nil { + // Walk the subgraph + log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) + subDiags := g.walk(walker) + diags = diags.Append(subDiags) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) + return + } + log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) + } + } + + // If the node has a subgraph, then walk the subgraph + if sn, ok := v.(GraphNodeSubgraph); ok { + log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) + + subDiags := sn.Subgraph().(*Graph).walk(walker) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) + return + } + log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) + } + + return + } + + return g.AcyclicGraph.Walk(walkFn) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go new file mode 100644 index 000000000..ee2c5857a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder.go @@ -0,0 +1,85 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// GraphBuilder is an interface that can be implemented and used with +// Terraform to build the graph that Terraform walks. +type GraphBuilder interface { + // Build builds the graph for the given module path. It is up to + // the interface implementation whether this build should expand + // the graph or not. + Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) +} + +// BasicGraphBuilder is a GraphBuilder that builds a graph out of a +// series of transforms and (optionally) validates the graph is a valid +// structure. +type BasicGraphBuilder struct { + Steps []GraphTransformer + Validate bool + // Optional name to add to the graph debug log + Name string +} + +func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + g := &Graph{Path: path} + + var lastStepStr string + for _, step := range b.Steps { + if step == nil { + continue + } + log.Printf("[TRACE] Executing graph transform %T", step) + + stepName := fmt.Sprintf("%T", step) + dot := strings.LastIndex(stepName, ".") + if dot >= 0 { + stepName = stepName[dot+1:] + } + + debugOp := g.DebugOperation(stepName, "") + err := step.Transform(g) + + errMsg := "" + if err != nil { + errMsg = err.Error() + } + debugOp.End(errMsg) + + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] Completed graph transform %T (no changes)", step) + } + + if err != nil { + if nf, isNF := err.(tfdiags.NonFatalError); isNF { + diags = diags.Append(nf.Diagnostics) + } else { + diags = diags.Append(err) + return g, diags + } + } + } + + // Validate the graph structure + if b.Validate { + if err := g.Validate(); err != nil { + log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) + diags = diags.Append(err) + return nil, diags + } + } + + return g, diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go new file mode 100644 index 000000000..918987610 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_apply.go @@ -0,0 +1,211 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ApplyGraphBuilder implements GraphBuilder and is responsible for building +// a graph for applying a Terraform diff. +// +// Because the graph is built from the diff (vs. the config or state), +// this helps ensure that the apply-time graph doesn't modify any resources +// that aren't explicitly in the diff. There are other scenarios where the +// diff can be deviated, so this is just one layer of protection. +type ApplyGraphBuilder struct { + // Config is the configuration tree that the diff was built from. + Config *configs.Config + + // Changes describes the changes that we need apply. + Changes *plans.Changes + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target. This is only required to make sure + // unnecessary outputs aren't included in the apply graph. The plan + // builder successfully handles targeting resources. In the future, + // outputs should go into the diff so that this is unnecessary. + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Destroy, if true, represents a pure destroy operation + Destroy bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "ApplyGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeApplyableResource{ + NodeAbstractResource: a, + } + } + + concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeDestroyResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config. During apply, + // we use this just to ensure that the whole-resource metadata is + // updated to reflect things such as whether the count argument is + // set in config, or which provider configuration manages each resource. + &ConfigTransformer{ + Concrete: concreteResource, + Config: b.Config, + }, + + // Creates all the resource instances represented in the diff, along + // with dependency edges against the whole-resource nodes added by + // ConfigTransformer above. + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: b.State, + Changes: b.Changes, + }, + + // Creates extra cleanup nodes for any entire resources that are + // no longer present in config, so we can make sure we clean up the + // leftover empty resource states after the instances have been + // destroyed. + // (We don't track this particular type of change in the plan because + // it's just cleanup of our own state object, and so doesn't effect + // any real remote objects or consumable outputs.) + &OrphanResourceTransformer{ + Concrete: concreteOrphanResource, + Config: b.Config, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{Config: b.Config, State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Provisioner-related transformations + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // add providers + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Destruction ordering + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, + + &CBDEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + Destroy: b.Destroy, + }, + + // Handle destroy time transformations for output and local values. + // Reverse the edges from outputs and locals, so that + // interpolations don't fail during destroy. + // Create a destroy node for outputs to remove them from the state. + // Prune unreferenced values, which may have interpolations that can't + // be resolved. + GraphTransformIf( + func() bool { return b.Destroy }, + GraphTransformMulti( + &DestroyValueReferenceTransformer{}, + &DestroyOutputTransformer{}, + &PruneUnusedValuesTransformer{}, + ), + ), + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{ + Config: b.Config, + }, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go new file mode 100644 index 000000000..32fe5f973 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_destroy_plan.go @@ -0,0 +1,97 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for +// planning a pure-destroy. +// +// Planning a pure destroy operation is simple because we can ignore most +// ordering configuration and simply reverse the state. +type DestroyPlanGraphBuilder struct { + // Config is the configuration tree to build the plan from. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "DestroyPlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlanDestroyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates nodes for the resource instances tracked in the state. + &StateTransformer{ + ConcreteCurrent: concreteResourceInstance, + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Destruction ordering. We require this only so that + // targeting below will prune the correct things. + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, + + // Target. Note we don't set "Destroy: true" here since we already + // created proper destroy ordering. + &TargetsTransformer{Targets: b.Targets}, + + // Single root + &RootTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go new file mode 100644 index 000000000..8a0bcf5ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_eval.go @@ -0,0 +1,108 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable +// for evaluating in-memory values (input variables, local values, output +// values) in the state without any other side-effects. +// +// This graph is used only in weird cases, such as the "terraform console" +// CLI command, where we need to evaluate expressions against the state +// without taking any other actions. +// +// The generated graph will include nodes for providers, resources, etc +// just to allow indirect dependencies to be resolved, but these nodes will +// not take any actions themselves since we assume that their parts of the +// state, if any, are already complete. +// +// Although the providers are never configured, they must still be available +// in order to obtain schema information used for type checking, etc. +type EvalGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "EvalGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Steps() []GraphTransformer { + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeEvalableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: nil, // just use the abstract type + Config: b.Config, + Unique: true, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Although we don't configure providers, we do still start them up + // to get their schemas, and so we must shut them down again here. + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Remove redundant edges to simplify the graph. + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go new file mode 100644 index 000000000..dcbb10e60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_import.go @@ -0,0 +1,100 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportGraphBuilder implements GraphBuilder and is responsible for building +// a graph for importing resources into Terraform. This is a much, much +// simpler graph than a normal configuration graph. +type ImportGraphBuilder struct { + // ImportTargets are the list of resources to import. + ImportTargets []*ImportTarget + + // Module is a configuration to build the graph from. See ImportOpts.Config. + Config *configs.Config + + // Components is the factory for our available plugin components. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// Build builds the graph according to the steps returned by Steps. +func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "ImportGraphBuilder", + }).Build(path) +} + +// Steps returns the ordered list of GraphTransformers that must be executed +// to build a complete graph. +func (b *ImportGraphBuilder) Steps() []GraphTransformer { + // Get the module. If we don't have one, we just use an empty tree + // so that the transform still works but does nothing. + config := b.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Create all our resources from the configuration and state + &ConfigTransformer{Config: config}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add the import steps + &ImportStateTransformer{Targets: b.ImportTargets}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), + + // This validates that the providers only depend on variables + &ImportProviderValidateTransformer{}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Optimize + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go new file mode 100644 index 000000000..bcd119b39 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_plan.go @@ -0,0 +1,204 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// PlanGraphBuilder implements GraphBuilder and is responsible for building +// a graph for planning (creating a Terraform Diff). +// +// The primary difference between this graph and others: +// +// * Based on the config since it represents the target state +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type PlanGraphBuilder struct { + // Config is the configuration tree to build a plan from. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool + + // CustomConcrete can be set to customize the node types created + // for various parts of the plan. This is useful in order to customize + // the plan behavior. + CustomConcrete bool + ConcreteProvider ConcreteProviderNodeFunc + ConcreteResource ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc + + once sync.Once +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "PlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Steps() []GraphTransformer { + b.once.Do(b.init) + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + steps := []GraphTransformer{ + // Creates all the resources represented in the config + &ConfigTransformer{ + Concrete: b.ConcreteResource, + Config: b.Config, + }, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add orphan resources + &OrphanResourceInstanceTransformer{ + Concrete: b.ConcreteResourceOrphan, + State: b.State, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can plan to destroy them. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{ + Config: b.Config, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, + + // Add module variables + &ModuleVariableTransformer{ + Config: b.Config, + }, + + TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{ + Config: b.Config, + }, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} + +func (b *PlanGraphBuilder) init() { + // Do nothing if the user requests customizing the fields + if b.CustomConcrete { + return + } + + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResource{ + NodeAbstractResource: a, + } + } + + b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go new file mode 100644 index 000000000..fad7bf161 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_refresh.go @@ -0,0 +1,194 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// RefreshGraphBuilder implements GraphBuilder and is responsible for building +// a graph for refreshing (updating the Terraform state). +// +// The primary difference between this graph and others: +// +// * Based on the state since it represents the only resources that +// need to be refreshed. +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type RefreshGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the prior state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas + + // Targets are resources to target + Targets []addrs.Targetable + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "RefreshGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ + NodeAbstractResource: a, + } + } + + concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + // The "Plan" node type also handles refreshing behavior. + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableDataResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Config: b.Config, + Unique: true, + ModeFilter: true, + Mode: addrs.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: concreteDataResource, + Config: b.Config, + Unique: true, + ModeFilter: true, + Mode: addrs.DataResourceMode, + }, + + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceInstanceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can check if they still exist. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go new file mode 100644 index 000000000..0aa8b915a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_builder_validate.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ValidateGraphBuilder creates the graph for the validate operation. +// +// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that +// we only have to validate what we'd normally plan anyways. The +// PlanGraphBuilder given will be modified so it shouldn't be used for anything +// else after calling this function. +func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodeValidatableResource{ + NodeAbstractResource: a, + } + } + + // We purposely don't set any other concrete types since they don't + // require validation. + + return p +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go new file mode 100644 index 000000000..5dbf415ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_dot.go @@ -0,0 +1,9 @@ +package terraform + +import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + +// GraphDot returns the dot formatting of a visual representation of +// the given Terraform graph. +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go new file mode 100644 index 000000000..a005ea5a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_interface_subgraph.go @@ -0,0 +1,11 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// GraphNodeSubPath says that a node is part of a graph with a +// different path, and the context should be adjusted accordingly. +type GraphNodeSubPath interface { + Path() addrs.ModuleInstance +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go new file mode 100644 index 000000000..d699376f2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// GraphWalker is an interface that can be implemented that when used +// with Graph.Walk will invoke the given callbacks under certain events. +type GraphWalker interface { + EnterPath(addrs.ModuleInstance) EvalContext + ExitPath(addrs.ModuleInstance) + EnterVertex(dag.Vertex) + ExitVertex(dag.Vertex, tfdiags.Diagnostics) + EnterEvalTree(dag.Vertex, EvalNode) EvalNode + ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics +} + +// NullGraphWalker is a GraphWalker implementation that does nothing. +// This can be embedded within other GraphWalker implementations for easily +// implementing all the required functions. +type NullGraphWalker struct{} + +func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} +func (NullGraphWalker) EnterVertex(dag.Vertex) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} +func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go new file mode 100644 index 000000000..11fb2fd01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_context.go @@ -0,0 +1,157 @@ +package terraform + +import ( + "context" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ContextGraphWalker is the GraphWalker implementation used with the +// Context struct to walk and evaluate the graph. +type ContextGraphWalker struct { + NullGraphWalker + + // Configurable values + Context *Context + State *states.SyncState // Used for safe concurrent access to state + Changes *plans.ChangesSync // Used for safe concurrent writes to changes + Operation walkOperation + StopContext context.Context + RootVariableValues InputValues + + // This is an output. Do not set this, nor read it while a graph walk + // is in progress. + NonFatalDiagnostics tfdiags.Diagnostics + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + variableValues map[string]map[string]cty.Value + variableValuesLock sync.Mutex + providerCache map[string]providers.Interface + providerSchemas map[string]*ProviderSchema + providerLock sync.Mutex + provisionerCache map[string]provisioners.Interface + provisionerSchemas map[string]*configschema.Block + provisionerLock sync.Mutex +} + +func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { + w.once.Do(w.init) + + w.contextLock.Lock() + defer w.contextLock.Unlock() + + // If we already have a context for this path cached, use that + key := path.String() + if ctx, ok := w.contexts[key]; ok { + return ctx + } + + // Our evaluator shares some locks with the main context and the walker + // so that we can safely run multiple evaluations at once across + // different modules. + evaluator := &Evaluator{ + Meta: w.Context.meta, + Config: w.Context.config, + Operation: w.Operation, + State: w.State, + Changes: w.Changes, + Schemas: w.Context.schemas, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + } + + ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, + PathValue: path, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + Components: w.Context.components, + Schemas: w.Context.schemas, + ProviderCache: w.providerCache, + ProviderInputConfig: w.Context.providerInputConfig, + ProviderLock: &w.providerLock, + ProvisionerCache: w.provisionerCache, + ProvisionerLock: &w.provisionerLock, + ChangesValue: w.Changes, + StateValue: w.State, + Evaluator: evaluator, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, + } + + w.contexts[key] = ctx + return ctx +} + +func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { + log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) + + // Acquire a lock on the semaphore + w.Context.parallelSem.Acquire() + + // We want to filter the evaluation tree to only include operations + // that belong in this operation. + return EvalFilter(n, EvalNodeFilterOp(w.Operation)) +} + +func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) + + // Release the semaphore + w.Context.parallelSem.Release() + + if err == nil { + return nil + } + + // Acquire the lock because anything is going to require a lock. + w.errorLock.Lock() + defer w.errorLock.Unlock() + + // If the error is non-fatal then we'll accumulate its diagnostics in our + // non-fatal list, rather than returning it directly, so that the graph + // walk can continue. + if nferr, ok := err.(tfdiags.NonFatalError); ok { + log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) + w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) + return nil + } + + // Otherwise, we'll let our usual diagnostics machinery figure out how to + // unpack this as one or more diagnostic messages and return that. If we + // get down here then the returned diagnostics will contain at least one + // error, causing the graph walk to halt. + var diags tfdiags.Diagnostics + diags = diags.Append(err) + return diags +} + +func (w *ContextGraphWalker) init() { + w.contexts = make(map[string]*BuiltinEvalContext) + w.providerCache = make(map[string]providers.Interface) + w.providerSchemas = make(map[string]*ProviderSchema) + w.provisionerCache = make(map[string]provisioners.Interface) + w.provisionerSchemas = make(map[string]*configschema.Block) + w.variableValues = make(map[string]map[string]cty.Value) + + // Populate root module variable values. Other modules will be populated + // during the graph walk. + w.variableValues[""] = make(map[string]cty.Value) + for k, iv := range w.RootVariableValues { + w.variableValues[""][k] = iv.Value + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go new file mode 100644 index 000000000..859f6fb12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graph_walk_operation.go @@ -0,0 +1,18 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=walkOperation graph_walk_operation.go + +// walkOperation is an enum which tells the walkContext what to do. +type walkOperation byte + +const ( + walkInvalid walkOperation = iota + walkApply + walkPlan + walkPlanDestroy + walkRefresh + walkValidate + walkDestroy + walkImport + walkEval // used just to prepare EvalContext for expression evaluation, with no other actions +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go new file mode 100644 index 000000000..b51e1a266 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/graphtype_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[GraphTypeInvalid-0] + _ = x[GraphTypeLegacy-1] + _ = x[GraphTypeRefresh-2] + _ = x[GraphTypePlan-3] + _ = x[GraphTypePlanDestroy-4] + _ = x[GraphTypeApply-5] + _ = x[GraphTypeValidate-6] + _ = x[GraphTypeEval-7] +} + +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" + +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} + +func (i GraphType) String() string { + if i >= GraphType(len(_GraphType_index)-1) { + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go new file mode 100644 index 000000000..b5be94824 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook.go @@ -0,0 +1,145 @@ +package terraform + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// HookAction is an enum of actions that can be taken as a result of a hook +// callback. This allows you to modify the behavior of Terraform at runtime. +type HookAction byte + +const ( + // HookActionContinue continues with processing as usual. + HookActionContinue HookAction = iota + + // HookActionHalt halts immediately: no more hooks are processed + // and the action that Terraform was about to take is cancelled. + HookActionHalt +) + +// Hook is the interface that must be implemented to hook into various +// parts of Terraform, allowing you to inspect or change behavior at runtime. +// +// There are MANY hook points into Terraform. If you only want to implement +// some hook points, but not all (which is the likely case), then embed the +// NilHook into your struct, which implements all of the interface but does +// nothing. Then, override only the functions you want to implement. +type Hook interface { + // PreApply and PostApply are called before and after an action for a + // single instance is applied. The error argument in PostApply is the + // error, if any, that was returned from the provider Apply call itself. + PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a provider is given + // the opportunity to customize the proposed new state to produce the + // planned new state. + PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) + PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + + // The provisioning hooks signal both the overall start end end of + // provisioning for a particular instance and of each of the individual + // configured provisioners for each instance. The sequence of these + // for a given instance might look something like this: + // + // PreProvisionInstance(aws_instance.foo[1], ...) + // PreProvisionInstanceStep(aws_instance.foo[1], "file") + // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) + // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") + // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) + // PostProvisionInstance(aws_instance.foo[1], ...) + // + // ProvisionOutput is called with output sent back by the provisioners. + // This will be called multiple times as output comes in, with each call + // representing one line of output. It cannot control whether the + // provisioner continues running. + PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) + PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) + ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) + + // PreRefresh and PostRefresh are called before and after a single + // resource state is refreshed, respectively. + PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) + PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) + + // PreImportState and PostImportState are called before and after + // (respectively) each state import operation for a given resource address. + PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PostStateUpdate is called each time the state is updated. It receives + // a deep copy of the state, which it may therefore access freely without + // any need for locks to protect from concurrent writes from the caller. + PostStateUpdate(new *states.State) (HookAction, error) +} + +// NilHook is a Hook implementation that does nothing. It exists only to +// simplify implementing hooks. You can embed this into your Hook implementation +// and only implement the functions you are interested in. +type NilHook struct{} + +var _ Hook = (*NilHook)(nil) + +func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { + return HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go new file mode 100644 index 000000000..74a29bde0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_mock.go @@ -0,0 +1,274 @@ +package terraform + +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// MockHook is an implementation of Hook that can be used for tests. +// It records all of its function calls. +type MockHook struct { + sync.Mutex + + PreApplyCalled bool + PreApplyAddr addrs.AbsResourceInstance + PreApplyGen states.Generation + PreApplyAction plans.Action + PreApplyPriorState cty.Value + PreApplyPlannedState cty.Value + PreApplyReturn HookAction + PreApplyError error + + PostApplyCalled bool + PostApplyAddr addrs.AbsResourceInstance + PostApplyGen states.Generation + PostApplyNewState cty.Value + PostApplyError error + PostApplyReturn HookAction + PostApplyReturnError error + PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) + + PreDiffCalled bool + PreDiffAddr addrs.AbsResourceInstance + PreDiffGen states.Generation + PreDiffPriorState cty.Value + PreDiffProposedState cty.Value + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffAddr addrs.AbsResourceInstance + PostDiffGen states.Generation + PostDiffAction plans.Action + PostDiffPriorState cty.Value + PostDiffPlannedState cty.Value + PostDiffReturn HookAction + PostDiffError error + + PreProvisionInstanceCalled bool + PreProvisionInstanceAddr addrs.AbsResourceInstance + PreProvisionInstanceState cty.Value + PreProvisionInstanceReturn HookAction + PreProvisionInstanceError error + + PostProvisionInstanceCalled bool + PostProvisionInstanceAddr addrs.AbsResourceInstance + PostProvisionInstanceState cty.Value + PostProvisionInstanceReturn HookAction + PostProvisionInstanceError error + + PreProvisionInstanceStepCalled bool + PreProvisionInstanceStepAddr addrs.AbsResourceInstance + PreProvisionInstanceStepProvisionerType string + PreProvisionInstanceStepReturn HookAction + PreProvisionInstanceStepError error + + PostProvisionInstanceStepCalled bool + PostProvisionInstanceStepAddr addrs.AbsResourceInstance + PostProvisionInstanceStepProvisionerType string + PostProvisionInstanceStepErrorArg error + PostProvisionInstanceStepReturn HookAction + PostProvisionInstanceStepError error + + ProvisionOutputCalled bool + ProvisionOutputAddr addrs.AbsResourceInstance + ProvisionOutputProvisionerType string + ProvisionOutputMessage string + + PreRefreshCalled bool + PreRefreshAddr addrs.AbsResourceInstance + PreRefreshGen states.Generation + PreRefreshPriorState cty.Value + PreRefreshReturn HookAction + PreRefreshError error + + PostRefreshCalled bool + PostRefreshAddr addrs.AbsResourceInstance + PostRefreshGen states.Generation + PostRefreshPriorState cty.Value + PostRefreshNewState cty.Value + PostRefreshReturn HookAction + PostRefreshError error + + PreImportStateCalled bool + PreImportStateAddr addrs.AbsResourceInstance + PreImportStateID string + PreImportStateReturn HookAction + PreImportStateError error + + PostImportStateCalled bool + PostImportStateAddr addrs.AbsResourceInstance + PostImportStateNewStates []providers.ImportedResource + PostImportStateReturn HookAction + PostImportStateError error + + PostStateUpdateCalled bool + PostStateUpdateState *states.State + PostStateUpdateReturn HookAction + PostStateUpdateError error +} + +var _ Hook = (*MockHook)(nil) + +func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyCalled = true + h.PreApplyAddr = addr + h.PreApplyGen = gen + h.PreApplyAction = action + h.PreApplyPriorState = priorState + h.PreApplyPlannedState = plannedNewState + return h.PreApplyReturn, h.PreApplyError +} + +func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyCalled = true + h.PostApplyAddr = addr + h.PostApplyGen = gen + h.PostApplyNewState = newState + h.PostApplyError = err + + if h.PostApplyFn != nil { + return h.PostApplyFn(addr, gen, newState, err) + } + + return h.PostApplyReturn, h.PostApplyReturnError +} + +func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreDiffCalled = true + h.PreDiffAddr = addr + h.PreDiffGen = gen + h.PreDiffPriorState = priorState + h.PreDiffProposedState = proposedNewState + return h.PreDiffReturn, h.PreDiffError +} + +func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostDiffCalled = true + h.PostDiffAddr = addr + h.PostDiffGen = gen + h.PostDiffAction = action + h.PostDiffPriorState = priorState + h.PostDiffPlannedState = plannedNewState + return h.PostDiffReturn, h.PostDiffError +} + +func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceCalled = true + h.PreProvisionInstanceAddr = addr + h.PreProvisionInstanceState = state + return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError +} + +func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceCalled = true + h.PostProvisionInstanceAddr = addr + h.PostProvisionInstanceState = state + return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError +} + +func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionInstanceStepCalled = true + h.PreProvisionInstanceStepAddr = addr + h.PreProvisionInstanceStepProvisionerType = typeName + return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError +} + +func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionInstanceStepCalled = true + h.PostProvisionInstanceStepAddr = addr + h.PostProvisionInstanceStepProvisionerType = typeName + h.PostProvisionInstanceStepErrorArg = err + return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError +} + +func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { + h.Lock() + defer h.Unlock() + + h.ProvisionOutputCalled = true + h.ProvisionOutputAddr = addr + h.ProvisionOutputProvisionerType = typeName + h.ProvisionOutputMessage = line +} + +func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreRefreshCalled = true + h.PreRefreshAddr = addr + h.PreRefreshGen = gen + h.PreRefreshPriorState = priorState + return h.PreRefreshReturn, h.PreRefreshError +} + +func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostRefreshCalled = true + h.PostRefreshAddr = addr + h.PostRefreshPriorState = priorState + h.PostRefreshNewState = newState + return h.PostRefreshReturn, h.PostRefreshError +} + +func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreImportStateCalled = true + h.PreImportStateAddr = addr + h.PreImportStateID = importID + return h.PreImportStateReturn, h.PreImportStateError +} + +func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostImportStateCalled = true + h.PostImportStateAddr = addr + h.PostImportStateNewStates = imported + return h.PostImportStateReturn, h.PostImportStateError +} + +func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostStateUpdateCalled = true + h.PostStateUpdateState = new + return h.PostStateUpdateReturn, h.PostStateUpdateError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go new file mode 100644 index 000000000..42c3d20cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/hook_stop.go @@ -0,0 +1,100 @@ +package terraform + +import ( + "sync/atomic" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// stopHook is a private Hook implementation that Terraform uses to +// signal when to stop or cancel actions. +type stopHook struct { + stop uint32 +} + +var _ Hook = (*stopHook)(nil) + +func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { +} + +func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) hook() (HookAction, error) { + if h.Stopped() { + // FIXME: This should really return an error since stopping partway + // through is not a successful run-to-completion, but we'll need to + // introduce that cautiously since existing automation solutions may + // be depending on this behavior. + return HookActionHalt, nil + } + + return HookActionContinue, nil +} + +// reset should be called within the lock context +func (h *stopHook) Reset() { + atomic.StoreUint32(&h.stop, 0) +} + +func (h *stopHook) Stop() { + atomic.StoreUint32(&h.stop, 1) +} + +func (h *stopHook) Stopped() bool { + return atomic.LoadUint32(&h.stop) == 1 +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go new file mode 100644 index 000000000..375a8638a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go new file mode 100644 index 000000000..95b7a9802 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypePrimary-1] + _ = x[TypeTainted-2] + _ = x[TypeDeposed-3] +} + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go new file mode 100644 index 000000000..f1434e625 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/module_dependencies.go @@ -0,0 +1,202 @@ +package terraform + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/moduledeps" + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ConfigTreeDependencies returns the dependencies of the tree of modules +// described by the given configuration and state. +// +// Both configuration and state are required because there can be resources +// implied by instances in the state that no longer exist in config. +func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { + // First we walk the configuration tree to build the overall structure + // and capture the explicit/implicit/inherited provider dependencies. + deps := configTreeConfigDependencies(root, nil) + + // Next we walk over the resources in the state to catch any additional + // dependencies created by existing resources that are no longer in config. + // Most things we find in state will already be present in 'deps', but + // we're interested in the rare thing that isn't. + configTreeMergeStateDependencies(deps, state) + + return deps +} + +func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { + if root == nil { + // If no config is provided, we'll make a synthetic root. + // This isn't necessarily correct if we're called with a nil that + // *isn't* at the root, but in practice that can never happen. + return &moduledeps.Module{ + Name: "root", + Providers: make(moduledeps.Providers), + } + } + + name := "root" + if len(root.Path) != 0 { + name = root.Path[len(root.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + module := root.Module + + // Provider dependencies + { + providers := make(moduledeps.Providers) + + // The main way to declare a provider dependency is explicitly inside + // the "terraform" block, which allows declaring a requirement without + // also creating a configuration. + for fullName, constraints := range module.ProviderRequirements { + inst := moduledeps.ProviderInstance(fullName) + + // The handling here is a bit fiddly because the moduledeps package + // was designed around the legacy (pre-0.12) configuration model + // and hasn't yet been revised to handle the new model. As a result, + // we need to do some translation here. + // FIXME: Eventually we should adjust the underlying model so we + // can also retain the source location of each constraint, for + // more informative output from the "terraform providers" command. + var rawConstraints version.Constraints + for _, constraint := range constraints { + rawConstraints = append(rawConstraints, constraint.Required...) + } + discoConstraints := discovery.NewConstraints(rawConstraints) + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + + // Provider configurations can also include version constraints, + // allowing for more terse declaration in situations where both a + // configuration and a constraint are defined in the same module. + for fullName, pCfg := range module.ProviderConfigs { + inst := moduledeps.ProviderInstance(fullName) + discoConstraints := discovery.AllVersions + if pCfg.Version.Required != nil { + discoConstraints = discovery.NewConstraints(pCfg.Version.Required) + } + if existing, exists := providers[inst]; exists { + existing.Constraints = existing.Constraints.Append(discoConstraints) + } else { + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range module.ManagedResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.StringCompact()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + for _, rc := range module.DataResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.String()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + + ret.Providers = providers + } + + childInherit := make(map[string]*configs.Provider) + for k, v := range inheritProviders { + childInherit[k] = v + } + for k, v := range module.ProviderConfigs { + childInherit[k] = v + } + for _, c := range root.Children { + ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) + } + + return ret +} + +func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { + if state == nil { + return + } + + findModule := func(path addrs.ModuleInstance) *moduledeps.Module { + module := root + for _, step := range path { + var next *moduledeps.Module + for _, cm := range module.Children { + if cm.Name == step.Name { + next = cm + break + } + } + + if next == nil { + // If we didn't find a next node, we'll need to make one + next = &moduledeps.Module{ + Name: step.Name, + Providers: make(moduledeps.Providers), + } + module.Children = append(module.Children, next) + } + + module = next + } + return module + } + + for _, ms := range state.Modules { + module := findModule(ms.Addr) + + for _, rs := range ms.Resources { + inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) + if _, exists := module.Providers[inst]; !exists { + module.Providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: moduledeps.ProviderDependencyFromState, + } + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go new file mode 100644 index 000000000..acd8262b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_count_boundary.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// NodeCountBoundary fixes up any transitions between "each modes" in objects +// saved in state, such as switching from NoEach to EachInt. +type NodeCountBoundary struct { + Config *configs.Config +} + +func (n *NodeCountBoundary) Name() string { + return "meta.count-boundary (EachMode fixup)" +} + +// GraphNodeEvalable +func (n *NodeCountBoundary) EvalTree() EvalNode { + return &EvalCountFixZeroOneBoundaryGlobal{ + Config: n.Config, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go new file mode 100644 index 000000000..56a33bce2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_destroy.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": +// it is ready to be destroyed. +type NodeDestroyableDataResourceInstance struct { + *NodeAbstractResourceInstance +} + +// GraphNodeEvalable +func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var providerSchema *ProviderSchema + // We don't need the provider, but we're calling EvalGetProvider to load the + // schema. + var provider providers.Interface + + // Just destroy it. + var state *states.ResourceInstanceObject + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go new file mode 100644 index 000000000..56283c0ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_data_refresh.go @@ -0,0 +1,229 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// NodeRefreshableDataResource represents a resource that is "refreshable". +type NodeRefreshableDataResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) + _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) +) + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + if !countKnown { + // If the count isn't known yet, we'll skip refreshing and try expansion + // again during the plan walk. + return nil, nil + } + + forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx) + diags = diags.Append(forEachDiags) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + if !forEachKnown { + // If the for_each isn't known yet, we'll skip refreshing and try expansion + // again during the plan walk. + return nil, nil + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableDataResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and provider since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableDataResource", + } + + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} + +// NodeRefreshableDataResourceInstance represents a single resource instance +// that is refreshable. +type NodeRefreshableDataResourceInstance struct { + *NodeAbstractResourceInstance +} + +// GraphNodeEvalable +func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // These variables are the state for the eval sequence below, and are + // updated through pointers. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Always destroy the existing state first, since we must + // make sure that values from a previous read will not + // get interpolated if we end up needing to defer our + // loading until apply time. + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, // a pointer to nil, here + ProviderSchema: &providerSchema, + }, + + // EvalReadData will _attempt_ to read the data source, but may + // generate an incomplete planned object if the configuration + // includes values that won't be known until apply. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputChange: &change, + OutputConfigValue: &configVal, + OutputState: &state, + // If the config explicitly has a depends_on for this data + // source, assume the intention is to prevent refreshing ahead + // of that dependency, and therefore we need to deal with this + // resource during the apply phase. We do that by forcing this + // read to result in a plan. + ForcePlanRead: len(n.Config.DependsOn) > 0, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return (*state).Status != states.ObjectPlanned, nil + }, + Then: &EvalSequence{ + Nodes: []EvalNode{ + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalUpdateStateHook{}, + }, + }, + Else: &EvalSequence{ + // We can't deal with this yet, so we'll repeat this step + // during the plan walk to produce a planned change to read + // this during the apply walk. However, we do still need to + // save the generated change and partial state so that + // results from it can be included in other data resources + // or provider configurations during the refresh walk. + // (The planned object we save in the state here will be + // pruned out at the end of the refresh walk, returning + // it back to being unset again for subsequent walks.) + Nodes: []EvalNode{ + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + }, + }, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go new file mode 100644 index 000000000..38681d83d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_local.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + Addr addrs.AbsLocalValue + Config *configs.Local +} + +var ( + _ GraphNodeSubPath = (*NodeLocal)(nil) + _ RemovableIfNotTargeted = (*NodeLocal)(nil) + _ GraphNodeReferenceable = (*NodeLocal)(nil) + _ GraphNodeReferencer = (*NodeLocal)(nil) + _ GraphNodeEvalable = (*NodeLocal)(nil) + _ dag.GraphNodeDotter = (*NodeLocal)(nil) +) + +func (n *NodeLocal) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.LocalValue} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(n.Config.Expr) + return appendResourceDestroyReferences(refs) +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalLocal{ + Addr: n.Addr.LocalValue, + Expr: n.Config.Expr, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go new file mode 100644 index 000000000..6e3cb41dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_removed.go @@ -0,0 +1,89 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + Addr addrs.ModuleInstance +} + +var ( + _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) + _ RemovableIfNotTargeted = (*NodeModuleRemoved)(nil) + _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) + _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) + _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) +) + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalCheckModuleRemoved{ + Addr: n.Addr, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + // Our "References" implementation indicates that this node depends on + // the call to the module it represents, which implicitly depends on + // everything inside the module. That reference must therefore be + // interpreted in terms of our parent module. + return n.Addr, n.Addr.Parent() +} + +func (n *NodeModuleRemoved) References() []*addrs.Reference { + // We depend on the call to the module we represent, because that + // implicitly then depends on everything inside that module. + // Our ReferenceOutside implementation causes this to be interpreted + // within the parent module. + + _, call := n.Addr.CallInstance() + return []*addrs.Reference{ + { + Subject: call, + + // No source range here, because there's nothing reasonable for + // us to return. + }, + } +} + +// RemovableIfNotTargeted +func (n *NodeModuleRemoved) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// EvalCheckModuleRemoved is an EvalNode implementation that verifies that +// a module has been removed from the state as expected. +type EvalCheckModuleRemoved struct { + Addr addrs.ModuleInstance +} + +func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { + mod := ctx.State().Module(n.Addr) + if mod != nil { + // If we get here then that indicates a bug either in the states + // module or in an earlier step of the graph walk, since we should've + // pruned out the module when the last resource was removed from it. + return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go new file mode 100644 index 000000000..76311a56d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_module_variable.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/zclconf/go-cty/cty" +) + +// NodeApplyableModuleVariable represents a module variable input during +// the apply step. +type NodeApplyableModuleVariable struct { + Addr addrs.AbsInputVariableInstance + Config *configs.Variable // Config is the var in the config + Expr hcl.Expression // Expr is the value expression given in the call +} + +// Ensure that we are implementing all of the interfaces we think we are +// implementing. +var ( + _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + +func (n *NodeApplyableModuleVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { + // We execute in the parent scope (above our own module) because + // expressions in our value are resolved in that context. + return n.Addr.Module.Parent() +} + +// RemovableIfNotTargeted +func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + + // Module input variables have their value expressions defined in the + // context of their calling (parent) module, and so references from + // a node of this type should be resolved in the parent module instance. + referencePath = n.Addr.Module.Parent() + + // Input variables are _referenced_ from their own module, though. + selfPath = n.Addr.Module + + return // uses named return values +} + +// GraphNodeReferenceable +func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Variable} +} + +// GraphNodeReferencer +func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { + + // If we have no value expression, we cannot depend on anything. + if n.Expr == nil { + return nil + } + + // Variables in the root don't depend on anything, because their values + // are gathered prior to the graph walk and recorded in the context. + if len(n.Addr.Module) == 0 { + return nil + } + + // Otherwise, we depend on anything referenced by our value expression. + // We ignore diagnostics here under the assumption that we'll re-eval + // all these things later and catch them then; for our purposes here, + // we only care about valid references. + // + // Due to our GraphNodeReferenceOutside implementation, the addresses + // returned by this function are interpreted in the _parent_ module from + // where our associated variable was declared, which is correct because + // our value expression is assigned within a "module" block in the parent + // module. + refs, _ := lang.ReferencesInExpr(n.Expr) + return refs +} + +// GraphNodeEvalable +func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { + // If we have no value, do nothing + if n.Expr == nil { + return &EvalNoop{} + } + + // Otherwise, interpolate the value of this variable and set it + // within the variables mapping. + vals := make(map[string]cty.Value) + + _, call := n.Addr.Module.CallInstance() + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalModuleCallArgument{ + Addr: n.Addr.Variable, + Config: n.Config, + Expr: n.Expr, + Values: vals, + + IgnoreDiagnostics: false, + }, + }, + + &EvalSetModuleCallArguments{ + Module: call, + Values: vals, + }, + }, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go new file mode 100644 index 000000000..753057123 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output.go @@ -0,0 +1,200 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeApplyableOutput represents an output that is "applyable": +// it is ready to be applied. +type NodeApplyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config +} + +var ( + _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) + _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) + _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) +) + +func (n *NodeApplyableOutput) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeTargetDownstream +func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + // If any of the direct dependencies of an output are targeted then + // the output must always be targeted as well, so its value will always + // be up-to-date at the completion of an apply walk. + return true +} + +func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { + + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = addr.Module + + // ...but they are referenced in the context of their calling module. + selfPath = addr.Module.Parent() + + return // uses named return values + +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if addr.Module.IsRoot() { + return nil + } + + // Otherwise, we can be referenced via a reference to our output name + // on the parent module's call, or via a reference to the entire call. + // e.g. module.foo.bar or just module.foo . + // Note that our ReferenceOutside method causes these addresses to be + // relative to the calling module, not the module where the output + // was declared. + _, outp := addr.ModuleCallOutput() + _, call := addr.Module.CallInstance() + return []addrs.Referenceable{outp, call} + +} + +// GraphNodeReferenceable +func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) +} + +func referencesForOutput(c *configs.Output) []*addrs.Reference { + impRefs, _ := lang.ReferencesInExpr(c.Expr) + expRefs, _ := lang.References(c.DependsOn) + l := len(impRefs) + len(expRefs) + if l == 0 { + return nil + } + refs := make([]*addrs.Reference, 0, l) + refs = append(refs, impRefs...) + refs = append(refs, expRefs...) + return refs + +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []*addrs.Reference { + return appendResourceDestroyReferences(referencesForOutput(n.Config)) +} + +// GraphNodeEvalable +func (n *NodeApplyableOutput) EvalTree() EvalNode { + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, + Node: &EvalWriteOutput{ + Addr: n.Addr.OutputValue, + Sensitive: n.Config.Sensitive, + Expr: n.Config.Expr, + }, + }, + }, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +// NodeDestroyableOutput represents an output that is "destroybale": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config +} + +var ( + _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) + _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) + _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) +) + +func (n *NodeDestroyableOutput) Name() string { + return fmt.Sprintf("%s (destroy)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// This will keep the destroy node in the graph if its corresponding output +// node is also in the destroy graph. +func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + return true +} + +// GraphNodeReferencer +func (n *NodeDestroyableOutput) References() []*addrs.Reference { + return referencesForOutput(n.Config) +} + +// GraphNodeEvalable +func (n *NodeDestroyableOutput) EvalTree() EvalNode { + return &EvalDeleteOutput{ + Addr: n.Addr.OutputValue, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go new file mode 100644 index 000000000..a76d1742c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_output_orphan.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeOutputOrphan represents an output that is an orphan. +type NodeOutputOrphan struct { + Addr addrs.AbsOutputValue +} + +var ( + _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) + _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) +) + +func (n *NodeOutputOrphan) Name() string { + return fmt.Sprintf("%s (orphan)", n.Addr.String()) +} + +// GraphNodeReferenceOutside implementation +func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) +} + +// GraphNodeSubPath +func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable +func (n *NodeOutputOrphan) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteOutput{ + Addr: n.Addr.OutputValue, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go new file mode 100644 index 000000000..2071ab168 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider.go @@ -0,0 +1,11 @@ +package terraform + +// NodeApplyableProvider represents a provider during an apply. +type NodeApplyableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeApplyableProvider) EvalTree() EvalNode { + return ProviderEvalTree(n, n.ProviderConfig()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go new file mode 100644 index 000000000..afdd4741d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_abstract.go @@ -0,0 +1,96 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ConcreteProviderNodeFunc is a callback type used to convert an +// abstract provider to a concrete one of some type. +type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex + +// NodeAbstractProvider represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeAbstractProvider struct { + Addr addrs.AbsProviderConfig + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *configs.Provider + Schema *configschema.Block +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) + _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) + _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) + _ GraphNodeProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) +) + +func (n *NodeAbstractProvider) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferencer +func (n *NodeAbstractProvider) References() []*addrs.Reference { + if n.Config == nil || n.Schema == nil { + return nil + } + + return ReferencesFromConfig(n.Config.Config, n.Schema) +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { + if n.Config == nil { + return nil + } + + return n.Config +} + +// GraphNodeAttachProvider +func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { + n.Config = c +} + +// GraphNodeAttachProviderConfigSchema impl. +func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { + n.Schema = schema +} + +// GraphNodeDotter impl. +func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go new file mode 100644 index 000000000..51335654b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_disabled.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// NodeDisabledProvider represents a provider that is disabled. A disabled +// provider does nothing. It exists to properly set inheritance information +// for child providers. +type NodeDisabledProvider struct { + *NodeAbstractProvider +} + +var ( + _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) + _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) + _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) + _ GraphNodeProvider = (*NodeDisabledProvider)(nil) + _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) + _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) +) + +func (n *NodeDisabledProvider) Name() string { + return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go new file mode 100644 index 000000000..580e60cb7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provider_eval.go @@ -0,0 +1,20 @@ +package terraform + +// NodeEvalableProvider represents a provider during an "eval" walk. +// This special provider node type just initializes a provider and +// fetches its schema, without configuring it or otherwise interacting +// with it. +type NodeEvalableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeEvalableProvider) EvalTree() EvalNode { + addr := n.Addr + relAddr := addr.ProviderConfig + + return &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go new file mode 100644 index 000000000..573f030d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_provisioner.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// NodeProvisioner represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeProvisioner struct { + NameValue string + PathValue addrs.ModuleInstance +} + +var ( + _ GraphNodeSubPath = (*NodeProvisioner)(nil) + _ GraphNodeProvisioner = (*NodeProvisioner)(nil) + _ GraphNodeEvalable = (*NodeProvisioner)(nil) +) + +func (n *NodeProvisioner) Name() string { + result := fmt.Sprintf("provisioner.%s", n.NameValue) + if len(n.PathValue) > 0 { + result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeProvisioner) Path() addrs.ModuleInstance { + return n.PathValue +} + +// GraphNodeProvisioner +func (n *NodeProvisioner) ProvisionerName() string { + return n.NameValue +} + +// GraphNodeEvalable impl. +func (n *NodeProvisioner) EvalTree() EvalNode { + return &EvalInitProvisioner{Name: n.NameValue} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go new file mode 100644 index 000000000..c7b0e3c8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_abstract.go @@ -0,0 +1,446 @@ +package terraform + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ConcreteResourceNodeFunc is a callback type used to convert an +// abstract resource to a concrete one of some type. +type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex + +// GraphNodeResource is implemented by any nodes that represent a resource. +// The type of operation cannot be assumed, only that this node represents +// the given resource. +type GraphNodeResource interface { + ResourceAddr() addrs.AbsResource +} + +// ConcreteResourceInstanceNodeFunc is a callback type used to convert an +// abstract resource instance to a concrete one of some type. +type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex + +// GraphNodeResourceInstance is implemented by any nodes that represent +// a resource instance. A single resource may have multiple instances if, +// for example, the "count" or "for_each" argument is used for it in +// configuration. +type GraphNodeResourceInstance interface { + ResourceInstanceAddr() addrs.AbsResourceInstance +} + +// NodeAbstractResource represents a resource that has no associated +// operations. It registers all the interfaces for a resource that common +// across multiple operation types. +type NodeAbstractResource struct { + Addr addrs.AbsResource // Addr is the address for this resource + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + + ProvisionerSchemas map[string]*configschema.Block + + Targets []addrs.Targetable // Set from GraphNodeTargetable + + // The address of the provider this resource will use + ResolvedProvider addrs.AbsProviderConfig +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResource)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) + _ GraphNodeReferencer = (*NodeAbstractResource)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeResource = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) + _ GraphNodeTargetable = (*NodeAbstractResource)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) +) + +// NewNodeAbstractResource creates an abstract resource graph node for +// the given absolute resource address. +func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { + return &NodeAbstractResource{ + Addr: addr, + } +} + +// NodeAbstractResourceInstance represents a resource instance with no +// associated operations. It embeds NodeAbstractResource but additionally +// contains an instance key, used to identify one of potentially many +// instances that were created from a resource in configuration, e.g. using +// the "count" or "for_each" arguments. +type NodeAbstractResourceInstance struct { + NodeAbstractResource + InstanceKey addrs.InstanceKey + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + ResourceState *states.Resource +} + +var ( + _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) +) + +// NewNodeAbstractResourceInstance creates an abstract resource instance graph +// node for the given absolute resource instance address. +func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { + // Due to the fact that we embed NodeAbstractResource, the given address + // actually ends up split between the resource address in the embedded + // object and the InstanceKey field in our own struct. The + // ResourceInstanceAddr method will stick these back together again on + // request. + return &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + Addr: addr.ContainingResource(), + }, + InstanceKey: addr.Resource.Key, + } +} + +func (n *NodeAbstractResource) Name() string { + return n.ResourceAddr().String() +} + +func (n *NodeAbstractResourceInstance) Name() string { + return n.ResourceInstanceAddr().String() +} + +// GraphNodeSubPath +func (n *NodeAbstractResource) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} + +// GraphNodeReferenceable +func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + addr := n.ResourceInstanceAddr() + return []addrs.Referenceable{ + addr.Resource, + + // A resource instance can also be referenced by the address of its + // containing resource, so that e.g. a reference to aws_instance.foo + // would match both aws_instance.foo[0] and aws_instance.foo[1]. + addr.ContainingResource().Resource, + } +} + +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []*addrs.Reference { + // If we have a config then we prefer to use that. + if c := n.Config; c != nil { + var result []*addrs.Reference + + for _, traversal := range c.DependsOn { + ref, err := addrs.ParseRef(traversal) + if err != nil { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) + continue + } + + result = append(result, ref) + } + + if n.Schema == nil { + // Should never happens, but we'll log if it does so that we can + // see this easily when debugging. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + } + + refs, _ := lang.ReferencesInExpr(c.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(c.ForEach) + result = append(result, refs...) + refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) + result = append(result, refs...) + if c.Managed != nil { + for _, p := range c.Managed.Provisioners { + if p.When != configs.ProvisionerWhenCreate { + continue + } + if p.Connection != nil { + refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + schema := n.ProvisionerSchemas[p.Type] + if schema == nil { + log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) + } + refs, _ = lang.ReferencesInBlock(p.Config, schema) + result = append(result, refs...) + } + } + return result + } + + // Otherwise, we have no references. + return nil +} + +// GraphNodeReferencer +func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { + // If we have a configuration attached then we'll delegate to our + // embedded abstract resource, which knows how to extract dependencies + // from configuration. + if n.Config != nil { + if n.Schema == nil { + // We'll produce a log message about this out here so that + // we can include the full instance address, since the equivalent + // message in NodeAbstractResource.References cannot see it. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + return nil + } + return n.NodeAbstractResource.References() + } + + // Otherwise, if we have state then we'll use the values stored in state + // as a fallback. + if rs := n.ResourceState; rs != nil { + if s := rs.Instance(n.InstanceKey); s != nil { + // State is still storing dependencies as old-style strings, so we'll + // need to do a little work here to massage this to the form we now + // want. + var result []*addrs.Reference + + // It is (apparently) possible for s.Current to be nil. This proved + // difficult to reproduce, so we will fix the symptom here and hope + // to find the root cause another time. + // + // https://github.com/hashicorp/terraform-plugin-sdk/issues/21407 + if s.Current == nil { + log.Printf("[WARN] no current state found for %s", n.Name()) + } else { + for _, addr := range s.Current.Dependencies { + if addr == nil { + // Should never happen; indicates a bug in the state loader + panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) + } + + // This is a little weird: we need to manufacture an addrs.Reference + // with a fake range here because the state isn't something we can + // make source references into. + result = append(result, &addrs.Reference{ + Subject: addr, + SourceRange: tfdiags.SourceRange{ + Filename: "(state file)", + }, + }) + } + } + return result + } + } + + // If we have neither config nor state then we have no references. + return nil +} + +// StateReferences returns the dependencies to put into the state for +// this resource. +func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { + selfAddrs := n.ReferenceableAddrs() + + // Since we don't include the source location references in our + // results from this method, we'll also filter out duplicates: + // there's no point in listing the same object twice without + // that additional context. + seen := map[string]struct{}{} + + // Pretend that we've already "seen" all of our own addresses so that we + // won't record self-references in the state. This can arise if, for + // example, a provisioner for a resource refers to the resource itself, + // which is valid (since provisioners always run after apply) but should + // not create an explicit dependency edge. + for _, selfAddr := range selfAddrs { + seen[selfAddr.String()] = struct{}{} + if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { + seen[riAddr.ContainingResource().String()] = struct{}{} + } + } + + depsRaw := n.References() + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, d := range depsRaw { + subj := d.Subject + if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { + // For state dependencies, we simplify outputs to just refer + // to the module as a whole. It's not really clear why we do this, + // but this logic is preserved from before the 0.12 rewrite of + // this function. + subj = mco.Call + } + + k := subj.String() + if _, exists := seen[k]; exists { + continue + } + seen[k] = struct{}{} + switch tr := subj.(type) { + case addrs.ResourceInstance: + deps = append(deps, tr) + case addrs.Resource: + deps = append(deps, tr) + case addrs.ModuleCallInstance: + deps = append(deps, tr) + default: + // No other reference types are recorded in the state. + } + } + + // We'll also sort them, since that'll avoid creating changes in the + // serialized state that make no semantic difference. + sort.Slice(deps, func(i, j int) bool { + // Simple string-based sort because we just care about consistency, + // not user-friendliness. + return deps[i].String() < deps[j].String() + }) + + return deps +} + +func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { + n.ResolvedProvider = p +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false + } + + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false + } + + // If we have state, then we will use the provider from there + if n.ResourceState != nil { + // An address from the state must match exactly, since we must ensure + // we refresh/destroy a resource with the same provider configuration + // that created it. + return n.ResourceState.ProviderConfig, true + } + + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) ProvisionedBy() []string { + // If we have no configuration, then we have no provisioners + if n.Config == nil || n.Config.Managed == nil { + return nil + } + + // Build the list of provisioners we need based on the configuration. + // It is okay to have duplicates here. + result := make([]string, len(n.Config.Managed.Provisioners)) + for i, p := range n.Config.Managed.Provisioners { + result[i] = p.Type + } + + return result +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { + if n.ProvisionerSchemas == nil { + n.ProvisionerSchemas = make(map[string]*configschema.Block) + } + n.ProvisionerSchemas[name] = schema +} + +// GraphNodeResource +func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { + return n.Addr +} + +// GraphNodeResourceInstance +func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) +} + +// GraphNodeAddressable, TODO: remove, used by target, should unify +func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { + return NewLegacyResourceAddress(n.Addr) +} + +// GraphNodeTargetable +func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { + n.Targets = targets +} + +// GraphNodeAttachResourceState +func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { + n.ResourceState = s +} + +// GraphNodeAttachResourceConfig +func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { + n.Config = c +} + +// GraphNodeAttachResourceSchema impl +func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { + n.Schema = schema + n.SchemaVersion = version +} + +// GraphNodeDotter impl. +func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "box", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go new file mode 100644 index 000000000..68d438d7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// NodeApplyableResource represents a resource that is "applyable": +// it may need to have its record in the state adjusted to match configuration. +// +// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, +// it should be inserted into the same graph as any instances of the nodes +// with dependency edges ensuring that the resource is evaluated before any +// of its instances, which will turn ensure that the whole-resource record +// in the state is suitably prepared to receive any updates to instances. +type NodeApplyableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeApplyableResource)(nil) + _ GraphNodeEvalable = (*NodeApplyableResource)(nil) + _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) + _ GraphNodeReferencer = (*NodeApplyableResource)(nil) +) + +func (n *NodeApplyableResource) Name() string { + return n.NodeAbstractResource.Name() + " (prepare state)" +} + +func (n *NodeApplyableResource) References() []*addrs.Reference { + if n.Config == nil { + log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) + return nil + } + + var result []*addrs.Reference + + // Since this node type only updates resource-level metadata, we only + // need to worry about the parts of the configuration that affect + // our "each mode": the count and for_each meta-arguments. + refs, _ := lang.ReferencesInExpr(n.Config.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(n.Config.ForEach) + result = append(result, refs...) + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + providerAddr := n.ResolvedProvider + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: providerAddr, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go new file mode 100644 index 000000000..acdda45e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_apply_instance.go @@ -0,0 +1,426 @@ +package terraform + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodeApplyableResourceInstance represents a resource instance that is +// "applyable": it is ready to be applied and is represented by a diff. +// +// This node is for a specific instance of a resource. It will usually be +// accompanied in the graph by a NodeApplyableResource representing its +// containing resource, and should depend on that node to ensure that the +// state is properly prepared to receive changes to instances. +type NodeApplyableResourceInstance struct { + *NodeAbstractResourceInstance + + destroyNode GraphNodeDestroyerCBD + graphNodeDeposer // implementation of GraphNodeDeposer +} + +var ( + _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) +) + +// GraphNodeAttachDestroyer +func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { + n.destroyNode = d +} + +// createBeforeDestroy checks this nodes config status and the status af any +// companion destroy node for CreateBeforeDestroy. +func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { + cbd := false + + if n.Config != nil && n.Config.Managed != nil { + cbd = n.Config.Managed.CreateBeforeDestroy + } + + if n.destroyNode != nil { + cbd = cbd || n.destroyNode.CreateBeforeDestroy() + } + + return cbd +} + +// GraphNodeCreator +func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeReferencer, overriding NodeAbstractResourceInstance +func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { + // Start with the usual resource instance implementation + ret := n.NodeAbstractResourceInstance.References() + + // Applying a resource must also depend on the destruction of any of its + // dependencies, since this may for example affect the outcome of + // evaluating an entire list of resources with "count" set (by reducing + // the count). + // + // However, we can't do this in create_before_destroy mode because that + // would create a dependency cycle. We make a compromise here of requiring + // changes to be updated across two applies in this case, since the first + // plan will use the old values. + if !n.createBeforeDestroy() { + for _, ref := range ret { + switch tr := ref.Subject.(type) { + case addrs.ResourceInstance: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + case addrs.Resource: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + } + } + } + + return ret +} + +// GraphNodeEvalable +func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + if n.Config == nil { + // This should not be possible, but we've got here in at least one + // case as discussed in the following issue: + // https://github.com/hashicorp/terraform-plugin-sdk/issues/21258 + // To avoid an outright crash here, we'll instead return an explicit + // error. + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource node has no configuration attached", + fmt.Sprintf( + "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", + addr, + ), + )) + err := diags.Err() + return &EvalReturnError{ + Error: &err, + } + } + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + + // Stop early if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if change == nil { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // In this particular call to EvalReadData we include our planned + // change, which signals that we expect this read to complete fully + // with no unknown values; it'll produce an error if not. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Planned: &change, // setting this indicates that the result must be complete + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var diff, diffApply *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var err error + var createNew bool + var createBeforeDestroyEnabled bool + var configVal cty.Value + var deposedKey states.DeposedKey + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diffApply, + }, + + // We don't want to do any destroys + // (these are handled by NodeDestroyResourceInstance instead) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + if diffApply.Action == plans.Delete { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) + } + if destroy && n.createBeforeDestroy() { + createBeforeDestroyEnabled = true + } + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Addr: addr.Resource, + ForceKey: n.PreallocatedDeposedKey, + OutputKey: &deposedKey, + }, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // Get the saved diff + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diff, + }, + + // Make a new diff, in case we've learned new values in the state + // during apply which we can now incorporate. + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + PreviousDiff: &diff, + OutputChange: &diffApply, + OutputValue: &configVal, + OutputState: &state, + }, + + // Compare the diffs + &EvalCheckPlannedChange{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Planned: &diff, + Actual: &diffApply, + }, + + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &diffApply, + Destroy: false, + OutChange: &diffApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it only requires destroying, since destroying + // is handled by NodeDestroyResourceInstance. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil || diffApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + }, + &EvalApply{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + State: &state, + Change: &diffApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, // EvalApplyProvisioners will skip if already tainted + ResourceConfig: n.Config, + CreateNew: &createNew, + Error: &err, + When: configs.ProvisionerWhenCreate, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalMaybeRestoreDeposedObject{ + Addr: addr.Resource, + Key: &deposedKey, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if !diff.Action.IsReplace() { + return true, nil + } + if !n.createBeforeDestroy() { + return true, nil + } + return false, nil + }, + Then: &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + }, + + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go new file mode 100644 index 000000000..049e5e990 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy.go @@ -0,0 +1,321 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodeDestroyResourceInstance represents a resource instance that is to be +// destroyed. +type NodeDestroyResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node destroys a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey + + CreateBeforeDestroyOverride *bool +} + +var ( + _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) +) + +func (n *NodeDestroyResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (destroy)" +} + +// GraphNodeDestroyer +func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { + if n.CreateBeforeDestroyOverride != nil { + return *n.CreateBeforeDestroyOverride + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + n.CreateBeforeDestroyOverride = &v + return nil +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() + destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) + + phaseType := addrs.ResourceInstancePhaseDestroy + if n.CreateBeforeDestroy() { + phaseType = addrs.ResourceInstancePhaseDestroyCBD + } + + for i, normalAddr := range normalAddrs { + switch ta := normalAddr.(type) { + case addrs.Resource: + destroyAddrs[i] = ta.Phase(phaseType) + case addrs.ResourceInstance: + destroyAddrs[i] = ta.Phase(phaseType) + default: + destroyAddrs[i] = normalAddr + } + } + + return destroyAddrs +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { + // If we have a config, then we need to include destroy-time dependencies + if c := n.Config; c != nil && c.Managed != nil { + var result []*addrs.Reference + + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + for _, p := range c.Managed.Provisioners { + schema := n.ProvisionerSchemas[p.Type] + + if p.When == configs.ProvisionerWhenDestroy { + if p.Connection != nil { + result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) + } + result = append(result, ReferencesFromConfig(p.Config, schema)...) + } + } + + return result + } + + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Get our state + rs := n.ResourceState + var is *states.ResourceInstance + if rs != nil { + is = rs.Instance(n.InstanceKey) + } + if is == nil { + log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) + } + + var changeApply *plans.ResourceInstanceChange + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var err error + return &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &changeApply, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &changeApply, + Destroy: true, + OutChange: &changeApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it does not require destroying. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if changeApply == nil || changeApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalReadState{ + Addr: addr.Resource, + Output: &state, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalRequireState{ + State: &state, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &changeApply, + }, + + // Run destroy provisioners if not tainted + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if state != nil && state.Status == states.ObjectTainted { + return false, nil + } + + return true, nil + }, + + Then: &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, + ResourceConfig: n.Config, + Error: &err, + When: configs.ProvisionerWhenDestroy, + }, + }, + + // If we have a provisioning error, then we just call + // the post-apply hook now. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return err != nil, nil + }, + + Then: &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + }, + + // Make sure we handle data sources properly. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil + }, + + Then: &EvalReadDataApply{ + Addr: addr.Resource, + Config: n.Config, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + }, + Else: &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + } +} + +// NodeDestroyResourceInstance represents a resource that is to be destroyed. +// +// Destroying a resource is a state-only operation: it is the individual +// instances being destroyed that affects remote objects. During graph +// construction, NodeDestroyResource should always depend on any other node +// related to the given resource, since it's just a final cleanup to avoid +// leaving skeleton resource objects in state after their instances have +// all been destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeDestroyResource)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) + _ GraphNodeReferencer = (*NodeDestroyResource)(nil) + _ GraphNodeEvalable = (*NodeDestroyResource)(nil) +) + +func (n *NodeDestroyResource) Name() string { + return n.ResourceAddr().String() + " (clean up state)" +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []*addrs.Reference { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // This EvalNode will produce an error if the resource isn't already + // empty by the time it is called, since it should just be pruning the + // leftover husk of a resource in state after all of the child instances + // and their objects were destroyed. + return &EvalForgetResourceState{ + Addr: n.ResourceAddr().Resource, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go new file mode 100644 index 000000000..269c79808 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_destroy_deposed.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert +// an abstract resource instance to a concrete one of some type that has +// an associated deposed object key. +type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex + +type GraphNodeDeposedResourceInstanceObject interface { + DeposedInstanceObjectKey() states.DeposedKey +} + +// NodePlanDeposedResourceInstanceObject represents deposed resource +// instance objects during plan. These are distinct from the primary object +// for each resource instance since the only valid operation to do with them +// is to destroy them. +// +// This node type is also used during the refresh walk to ensure that the +// record of a deposed object is up-to-date before we plan to destroy it. +type NodePlanDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) +) + +func (n *NodePlanDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) +} + +func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeEvalable impl. +func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // During the refresh walk we will ensure that our record of the deposed + // object is up-to-date. If it was already deleted outside of Terraform + // then this will remove it from state and thus avoid us planning a + // destroy for it during the subsequent plan walk. + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Key: n.DeposedKey, + Output: &state, + }, + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + }, + }) + + // During the plan walk we always produce a planned destroy change, because + // destroying is the only supported action for deposed objects. + var change *plans.ResourceInstanceChange + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkPlan, walkPlanDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + DeposedKey: n.DeposedKey, + State: &state, + Output: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + DeposedKey: n.DeposedKey, + ProviderSchema: &providerSchema, + Change: &change, + }, + // Since deposed objects cannot be referenced by expressions + // elsewhere, we don't need to also record the planned new + // state in this case. + }, + }, + }) + + return seq +} + +// NodeDestroyDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "delete" changes for +// deposed instance objects, and its only supported operation is to destroy +// and then forget the associated object. +type NodeDestroyDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) +) + +func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) +} + +func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeDestroyer +func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { + // A deposed instance is always CreateBeforeDestroy by definition, since + // we use deposed only to handle create-before-destroy. + return true +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { + if !v { + // Should never happen: deposed instances are _always_ create_before_destroy. + return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") + } + return nil +} + +// GraphNodeEvalable impl. +func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var change *plans.ResourceInstanceChange + var err error + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &change, + }, + &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &change, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} + +// GraphNodeDeposer is an optional interface implemented by graph nodes that +// might create a single new deposed object for a specific associated resource +// instance, allowing a caller to optionally pre-allocate a DeposedKey for +// it. +type GraphNodeDeposer interface { + // SetPreallocatedDeposedKey will be called during graph construction + // if a particular node must use a pre-allocated deposed key if/when it + // "deposes" the current object of its associated resource instance. + SetPreallocatedDeposedKey(key states.DeposedKey) +} + +// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. +// Embed it in a node type to get automatic support for it, and then access +// the field PreallocatedDeposedKey to access any pre-allocated key. +type graphNodeDeposer struct { + PreallocatedDeposedKey states.DeposedKey +} + +func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { + n.PreallocatedDeposedKey = key +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go new file mode 100644 index 000000000..2dc0df908 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan.go @@ -0,0 +1,166 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodePlannableResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodePlannableResource struct { + *NodeAbstractResource + + // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD + // during graph construction, if dependencies require us to force this + // on regardless of what the configuration says. + ForceCreateBeforeDestroy *bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResource)(nil) + _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) + _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) + _ GraphNodeReferenceable = (*NodePlannableResource)(nil) + _ GraphNodeReferencer = (*NodePlannableResource)(nil) + _ GraphNodeResource = (*NodePlannableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + // this ensures we can reference the resource even if the count is 0 + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: n.ResolvedProvider, + } +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy != nil { + return *n.ForceCreateBeforeDestroy + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = &v + return nil +} + +// GraphNodeDynamicExpandable +func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + + return &NodePlannableResourceInstance{ + NodeAbstractResourceInstance: a, + + // By the time we're walking, we've figured out whether we need + // to force on CreateBeforeDestroy due to dependencies on other + // nodes that have it. + ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), + } + } + + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas + + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count or for_each (if present) + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count/for_each orphans + &OrphanResourceCountTransformer{ + Concrete: concreteResourceOrphan, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodePlannableResource", + } + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go new file mode 100644 index 000000000..2c3a7012b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_destroy.go @@ -0,0 +1,88 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodePlanDestroyableResourceInstance represents a resource that is ready +// to be planned for destruction. +type NodePlanDestroyableResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) +) + +// GraphNodeDestroyer +func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeEvalable +func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. These are written to by address in the EvalNodes we + // declare below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + if n.ResolvedProvider.ProviderConfig.Type == "" { + // Should never happen; indicates that the graph was not constructed + // correctly since we didn't get our provider attached. + panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go new file mode 100644 index 000000000..ac4b24cf2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_instance.go @@ -0,0 +1,201 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/zclconf/go-cty/cty" +) + +// NodePlannableResourceInstance represents a _single_ resource +// instance that is plannable. This means this represents a single +// count index, for example. +type NodePlannableResourceInstance struct { + *NodeAbstractResourceInstance + ForceCreateBeforeDestroy bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // If we already have a non-planned state then we already dealt + // with this during the refresh walk and so we have nothing to do + // here. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + depChanges := false + + // Check and see if any of our dependencies have changes. + changes := ctx.Changes() + for _, d := range n.StateReferences() { + ri, ok := d.(addrs.ResourceInstance) + if !ok { + continue + } + change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) + if change != nil && change.Action != plans.NoOp { + depChanges = true + break + } + } + + refreshed := state != nil && state.Status != states.ObjectPlanned + + // If there are no dependency changes, and it's not a forced + // read because we there was no Refresh, then we don't need + // to re-read. If any dependencies have changes, it means + // our config may also have changes and we need to Read the + // data source again. + if !depChanges && refreshed { + return false, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready + OutputChange: &change, + OutputValue: &configVal, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} + +func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + CreateBeforeDestroy: n.ForceCreateBeforeDestroy, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go new file mode 100644 index 000000000..8e4f7148f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_plan_orphan.go @@ -0,0 +1,84 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlannableResourceInstanceOrphan struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) + +var ( + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) + +func (n *NodePlannableResourceInstanceOrphan) Name() string { + return n.ResourceInstanceAddr().String() + " (orphan)" +} + +// GraphNodeEvalable +func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var provider providers.Interface + var providerSchema *ProviderSchema + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + Output: &change, + OutputState: &state, // Will point to a nil state after this complete, signalling destroyed + }, + &EvalCheckPreventDestroy{ + Addr: addr.Resource, + Config: n.Config, + Change: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go new file mode 100644 index 000000000..dcab37270 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_refresh.go @@ -0,0 +1,296 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) +) + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx) + if forEachDiags.HasErrors() { + return nil, diags.Err() + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + ForEach: forEachMap, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeRefreshableManagedResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) +) + +// GraphNodeDestroyer +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeEvalable +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // Eval info is different depending on what kind of resource this is + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + if n.ResourceState == nil { + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) + return n.evalTreeManagedResourceNoState() + } + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) + return n.evalTreeManagedResource() + + case addrs.DataResourceMode: + // Get the data source node. If we don't have a configuration + // then it is an orphan so we destroy it (remove it from the state). + var dn GraphNodeEvalable + if n.Config != nil { + dn = &NodeRefreshableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, + } + } else { + dn = &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, + } + } + + return dn.EvalTree() + default: + panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) + } +} + +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + // This happened during initial development. All known cases were + // fixed and tested but as a sanity check let's assert here. + if n.ResourceState == nil { + err := fmt.Errorf( + "No resource state attached for addr: %s\n\n"+ + "This is a bug. Please report this to Terraform with your configuration\n"+ + "and state attached. Please be careful to scrub any sensitive information.", + addr) + return &EvalReturnError{Error: &err} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} + +// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource +// nodes that don't have state attached. An example of where this functionality +// is useful is when a resource that already exists in state is being scaled +// out, ie: has its resource count increased. In this case, the scaled out node +// needs to be available to other nodes (namely data sources) that may depend +// on it for proper interpolation, or confusing "index out of range" errors can +// occur. +// +// The steps in this sequence are very similar to the steps carried out in +// plan, but nothing is done with the diff after it is created - it is dropped, +// and its changes are not counted in the UI. +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + Stub: true, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // We must also save the planned change, so that expressions in + // other nodes, such as provider configurations and data resources, + // can work with the planned new value. + // + // This depends on the fact that Context.Refresh creates a + // temporary new empty changeset for the duration of its graph + // walk, and so this recorded change will be discarded immediately + // after the refresh walk completes. + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go new file mode 100644 index 000000000..f0eb18a06 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_resource_validate.go @@ -0,0 +1,90 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" + "github.com/zclconf/go-cty/cty" +) + +// NodeValidatableResource represents a resource that is used for validation +// only. +type NodeValidatableResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeSubPath = (*NodeValidatableResource)(nil) + _ GraphNodeEvalable = (*NodeValidatableResource)(nil) + _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) + _ GraphNodeReferencer = (*NodeValidatableResource)(nil) + _ GraphNodeResource = (*NodeValidatableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + // Declare the variables will be used are used to pass values along + // the evaluation sequence below. These are written to via pointers + // passed to the EvalNodes. + var provider providers.Interface + var providerSchema *ProviderSchema + var configVal cty.Value + + seq := &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalValidateResource{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Config: config, + ConfigVal: &configVal, + }, + }, + } + + if managed := n.Config.Managed; managed != nil { + hasCount := n.Config.Count != nil + hasForEach := n.Config.ForEach != nil + + // Validate all the provisioners + for _, p := range managed.Provisioners { + var provisioner provisioners.Interface + var provisionerSchema *configschema.Block + + if p.Connection == nil { + p.Connection = config.Managed.Connection + } else if config.Managed.Connection != nil { + p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) + } + + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + Schema: &provisionerSchema, + }, + &EvalValidateProvisioner{ + ResourceAddr: addr.Resource, + Provisioner: &provisioner, + Schema: &provisionerSchema, + Config: p, + ResourceHasCount: hasCount, + ResourceHasForEach: hasForEach, + }, + ) + } + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go new file mode 100644 index 000000000..844d060c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/node_root_variable.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// NodeRootVariable represents a root variable input. +type NodeRootVariable struct { + Addr addrs.InputVariable + Config *configs.Variable +} + +var ( + _ GraphNodeSubPath = (*NodeRootVariable)(nil) + _ GraphNodeReferenceable = (*NodeRootVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + +func (n *NodeRootVariable) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeRootVariable) Path() addrs.ModuleInstance { + return addrs.RootModuleInstance +} + +// GraphNodeReferenceable +func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// dag.GraphNodeDotter impl. +func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go new file mode 100644 index 000000000..19e3469cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/path.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// PathObjectCacheKey is like PathCacheKey but includes an additional name +// to be included in the key, for module-namespaced objects. +// +// The result of this function is guaranteed unique for any distinct pair +// of path and name, but is not guaranteed to be in any particular format +// and in particular should never be shown to end-users. +func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { + return fmt.Sprintf("%s|%s", path.String(), objectName) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go new file mode 100644 index 000000000..5c19f6e7c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/plan.go @@ -0,0 +1,94 @@ +package terraform + +import ( + "bytes" + "encoding/gob" + "fmt" + "io" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/zclconf/go-cty/cty" +) + +func init() { + gob.Register(make([]interface{}, 0)) + gob.Register(make([]map[string]interface{}, 0)) + gob.Register(make(map[string]interface{})) + gob.Register(make(map[string]string)) +} + +// Plan represents a single Terraform execution plan, which contains +// all the information necessary to make an infrastructure change. +// +// A plan has to contain basically the entire state of the world +// necessary to make a change: the state, diff, config, backend config, etc. +// This is so that it can run alone without any other data. +type Plan struct { + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Config represents the entire configuration that was present when this + // plan was created. + Config *configs.Config + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]cty.Value + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. + Targets []string + + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. + TerraformVersion string + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte + + // Backend is the backend that this plan should use and store data with. + Backend *BackendState + + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool +} + +func (p *Plan) String() string { + buf := new(bytes.Buffer) + buf.WriteString("DIFF:\n\n") + buf.WriteString(p.Diff.String()) + buf.WriteString("\n\nSTATE:\n\n") + buf.WriteString(p.State.String()) + return buf.String() +} + +// ReadPlan reads a plan structure out of a reader in the format that +// was written by WritePlan. +func ReadPlan(src io.Reader) (*Plan, error) { + return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") +} + +// WritePlan writes a plan somewhere in a binary format. +func WritePlan(d *Plan, dst io.Writer) error { + return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go new file mode 100644 index 000000000..7e401f33e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provider_mock.go @@ -0,0 +1,521 @@ +package terraform + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ValidateFn func(c *ResourceConfig) (ws []string, es []error) + ConfigureFn func(c *ResourceConfig) error + DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) + ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + warns, errs := p.ValidateFn(rc) + ret := providers.ValidateResourceTypeConfigResponse{} + for _, warn := range warns { + ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + } + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + ret := providers.ConfigureResponse{} + + err := p.ConfigureFn(rc) + if err != nil { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + return ret + } + if p.ConfigureNewFn != nil { + return p.ConfigureNewFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + // make sure the NewState fits the schema + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) + if err != nil { + panic(err) + } + resp := p.ReadResourceResponse + resp.NewState = newState + + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.DiffFn != nil { + ps := p.getSchema() + if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), + } + } + schema := ps.ResourceTypes[r.TypeName].Block + info := &InstanceInfo{ + Type: r.TypeName, + } + priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) + cfg := NewResourceConfigShimmed(r.Config, schema) + + legacyDiff, err := p.DiffFn(info, priorState, cfg) + + var res providers.PlanResourceChangeResponse + res.PlannedState = r.ProposedNewState + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + if legacyDiff != nil { + newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + + res.PlannedState = newVal + + var requiresNew []string + for attr, d := range legacyDiff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + res.RequiresReplace = requiresReplace + } + return res + } + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyFn != nil { + // ApplyFn is a special callback fashioned after our old provider + // interface, which expected to be given an actual diff rather than + // separate old/new values to apply. Therefore we need to approximate + // a diff here well enough that _most_ of our legacy ApplyFns in old + // tests still see the behavior they are expecting. New tests should + // not use this, and should instead use ApplyResourceChangeFn directly. + providerSchema := p.getSchema() + schema, ok := providerSchema.ResourceTypes[r.TypeName] + if !ok { + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), + } + } + + info := &InstanceInfo{ + Type: r.TypeName, + } + + priorVal := r.PriorState + plannedVal := r.PlannedState + priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) + plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) + s := NewInstanceStateShimmedFromValue(priorVal, 0) + d := &InstanceDiff{ + Attributes: make(map[string]*ResourceAttrDiff), + } + if plannedMap == nil { // destroying, then + d.Destroy = true + // Destroy diffs don't have any attribute diffs + } else { + if priorMap == nil { // creating, then + // We'll just make an empty prior map to make things easier below. + priorMap = make(map[string]string) + } + + for k, new := range plannedMap { + old := priorMap[k] + newComputed := false + if new == hcl2shim.UnknownVariableValue { + new = "" + newComputed = true + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + New: new, + NewComputed: newComputed, + Type: DiffAttrInput, // not generally used in tests, so just hard-coded + } + } + // Also need any attributes that were removed in "planned" + for k, old := range priorMap { + if _, ok := plannedMap[k]; ok { + continue + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + NewRemoved: true, + Type: DiffAttrInput, + } + } + } + newState, err := p.ApplyFn(info, s, d) + resp := providers.ApplyResourceChangeResponse{} + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + if newState != nil { + var newVal cty.Value + if newState != nil { + var err error + newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + } else { + // If apply returned a nil new state then that's the old way to + // indicate that the object was destroyed. Our new interface calls + // for that to be signalled as a null value. + newVal = cty.NullVal(schema.Block.ImpliedType()) + } + resp.NewState = newVal + } + + return resp + } + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go new file mode 100644 index 000000000..93b19be57 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/provisioner_mock.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ApplyFn func(rs *InstanceState, c *ResourceConfig) error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ApplyFn != nil { + if !r.Config.IsKnown() { + panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) + } + + schema := p.getSchema() + rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) + connVal := r.Connection + connMap := map[string]string{} + + if !connVal.IsNull() && connVal.IsKnown() { + for it := connVal.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = convert.Convert(av, cty.String) + connMap[name] = av.AsString() + } + } + + // We no longer pass the full instance state to a provisioner, so we'll + // construct a partial one that should be good enough for what existing + // test mocks need. + is := &InstanceState{ + Ephemeral: EphemeralState{ + ConnInfo: connMap, + }, + } + var resp provisioners.ProvisionResourceResponse + err := p.ApplyFn(is, rc) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + return resp + } + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + p.Unlock() + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go new file mode 100644 index 000000000..bd5774600 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource.go @@ -0,0 +1,510 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" +) + +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Flags ResourceFlag +} + +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name + } + + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } + } + + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } +} + +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go new file mode 100644 index 000000000..8a683012d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_address.go @@ -0,0 +1,618 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + + n.Path = append(n.Path, r.Path...) + + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// https://www.terraform.io/docs/internals/resource-addressing.html +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { + if r.HasResourceSpec() { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(path) == 0 { + path = nil + } + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case ManagedResourceMode: + // Done + case DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { + ret.Index = -1 + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource heirarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more heirarchical approach to comparing +// addresses. +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go new file mode 100644 index 000000000..c83643a65 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode.go @@ -0,0 +1,12 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go new file mode 100644 index 000000000..ba84346a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go new file mode 100644 index 000000000..fec45967f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider.go @@ -0,0 +1,319 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/plugin/discovery" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" +) + +// ResourceProvider is an interface that must be implemented by any +// resource provider: the thing that creates and manages the resources in +// a Terraform configuration. +// +// Important implementation note: All returned pointers, such as +// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to +// shared data. Terraform is highly parallel and assumes that this data is safe +// to read/write in parallel so it must be unique references. Note that it is +// safe to return arguments as results, however. +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // GetSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderResolver is an interface implemented by objects that are +// able to resolve a given set of resource provider version constraints +// into ResourceProviderFactory callbacks. +type ResourceProviderResolver interface { + // Given a constraint map, return a ResourceProviderFactory for each + // requested provider. If some or all of the constraints cannot be + // satisfied, return a non-nil slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) +} + +// ResourceProviderResolverFunc wraps a callback function and turns it into +// a ResourceProviderResolver implementation, for convenience in situations +// where a function and its associated closure are sufficient as a resolver +// implementation. +type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) + +// ResolveProviders implements ResourceProviderResolver by calling the +// wrapped function. +func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + return f(reqd) +} + +// ResourceProviderResolverFixed returns a ResourceProviderResolver that +// has a fixed set of provider factories provided by the caller. The returned +// resolver ignores version constraints entirely and just returns the given +// factory for each requested provider name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { + return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + ret := make(map[string]ResourceProviderFactory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} + +// resourceProviderFactories matches available plugins to the given version +// requirements to produce a map of compatible provider plugins if possible, +// or an error if the currently-available plugins are insufficient. +// +// This should be called only with configurations that have passed calls +// to config.Validate(), which ensures that all of the given version +// constraints are valid. It will panic if any invalid constraints are present. +func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret, errs := resolver.ResolveProviders(reqd) + if errs != nil { + diags = diags.Append( + tfdiags.Sourceless(tfdiags.Error, + "Could not satisfy plugin requirements", + errPluginInit, + ), + ) + + for _, err := range errs { + diags = diags.Append(err) + } + + return nil, diags + } + + return ret, nil +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints from each module, run "terraform providers". +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go new file mode 100644 index 000000000..4000e3d21 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provider_mock.go @@ -0,0 +1,315 @@ +package terraform + +import ( + "sync" +) + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureFn != nil { + return p.ConfigureFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go new file mode 100644 index 000000000..74ee2a940 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/provisioners" +) + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns the new + // resource state along with an error. Instead of a diff, the ResourceConfig + // is provided since provisioners only run after a resource has been + // newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go new file mode 100644 index 000000000..ed6f241bc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/resource_provisioner_mock.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" +) + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go new file mode 100644 index 000000000..8bc3b017b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/schemas.go @@ -0,0 +1,278 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[string]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[typeName] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { + ps := ss.ProviderSchema(typeName) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(providerType) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[string]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(typeName string) { + if _, exists := schemas[typeName]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) + provider, err := components.ResourceProvider(typeName, "early/"+typeName) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), + ) + } + } + + schemas[typeName] = s + } + + if config != nil { + for _, typeName := range config.ProviderTypes() { + ensure(typeName) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeName := range needed { + ensure(typeName) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name, "early/"+name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go new file mode 100644 index 000000000..1d742c2f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state.go @@ -0,0 +1,2217 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" + "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return tfversion.SemVer.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() + if s == nil { + return cty.NullVal(ty), nil + } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} + +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err + } + s.ConfigRaw = buf + return nil +} + +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { + m.Lock() + defer m.Unlock() + + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } + } + + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) + } + + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + buf := bufio.NewReader(src) + + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go new file mode 100644 index 000000000..2dcb11b76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go new file mode 100644 index 000000000..aa13cce80 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v1_to_v2.go @@ -0,0 +1,189 @@ +package terraform + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go new file mode 100644 index 000000000..e52d35fcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_upgrade_v2_to_v3.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go new file mode 100644 index 000000000..68cffb41b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/state_v1.go @@ -0,0 +1,145 @@ +package terraform + +// stateV1 keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in Terraform. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go new file mode 100644 index 000000000..3f0418d92 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/testing.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go new file mode 100644 index 000000000..f9559f41b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphTransformer is the interface that transformers implement. This +// interface is only for transforms that need entire graph visibility. +type GraphTransformer interface { + Transform(*Graph) error +} + +// GraphVertexTransformer is an interface that transforms a single +// Vertex within with graph. This is a specialization of GraphTransformer +// that makes it easy to do vertex replacement. +// +// The GraphTransformer that runs through the GraphVertexTransformers is +// VertexTransformer. +type GraphVertexTransformer interface { + Transform(dag.Vertex) (dag.Vertex, error) +} + +// GraphTransformIf is a helper function that conditionally returns a +// GraphTransformer given. This is useful for calling inline a sequence +// of transforms without having to split it up into multiple append() calls. +func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { + if f() { + return then + } + + return nil +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + var lastStepStr string + for _, t := range t.Transforms { + log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) + if err := t.Transform(g); err != nil { + return err + } + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) + } + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go new file mode 100644 index 000000000..cbac13387 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_provider.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// GraphNodeAttachProvider is an interface that must be implemented by nodes +// that want provider configurations attached. +type GraphNodeAttachProvider interface { + // Must be implemented to determine the path for the configuration + GraphNodeSubPath + + // ProviderName with no module prefix. Example: "aws". + ProviderAddr() addrs.AbsProviderConfig + + // Sets the configuration + AttachProvider(*configs.Provider) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go new file mode 100644 index 000000000..23578c784 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_config_resource.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes +// that want resource configurations attached. +type GraphNodeAttachResourceConfig interface { + GraphNodeResource + + // Sets the configuration + AttachResourceConfig(*configs.Resource) +} + +// AttachResourceConfigTransformer goes through the graph and attaches +// resource configuration structures to nodes that implement +// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachResourceConfigTransformer struct { + Config *configs.Config // Config is the root node in the config tree +} + +func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { + + // Go through and find GraphNodeAttachResource + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachResource implementations + arn, ok := v.(GraphNodeAttachResourceConfig) + if !ok { + continue + } + + // Determine what we're looking for + addr := arn.ResourceAddr() + + // Get the configuration. + config := t.Config.DescendentForInstance(addr.Module) + if config == nil { + log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) + continue + } + + for _, r := range config.Module.ManagedResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue + } + + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + for _, r := range config.Module.DataResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue + } + + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go new file mode 100644 index 000000000..fee220b52 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_schema.go @@ -0,0 +1,99 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeAttachResourceSchema is an interface implemented by node types +// that need a resource schema attached. +type GraphNodeAttachResourceSchema interface { + GraphNodeResource + GraphNodeProviderConsumer + + AttachResourceSchema(schema *configschema.Block, version uint64) +} + +// GraphNodeAttachProviderConfigSchema is an interface implemented by node types +// that need a provider configuration schema attached. +type GraphNodeAttachProviderConfigSchema interface { + GraphNodeProvider + + AttachProviderConfigSchema(*configschema.Block) +} + +// GraphNodeAttachProvisionerSchema is an interface implemented by node types +// that need one or more provisioner schemas attached. +type GraphNodeAttachProvisionerSchema interface { + ProvisionedBy() []string + + // SetProvisionerSchema is called during transform for each provisioner + // type returned from ProvisionedBy, providing the configuration schema + // for each provisioner in turn. The implementer should save these for + // later use in evaluating provisioner configuration blocks. + AttachProvisionerSchema(name string, schema *configschema.Block) +} + +// AttachSchemaTransformer finds nodes that implement +// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or +// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each +// and then passes them to a method implemented by the node. +type AttachSchemaTransformer struct { + Schemas *Schemas +} + +func (t *AttachSchemaTransformer) Transform(g *Graph) error { + if t.Schemas == nil { + // Should never happen with a reasonable caller, but we'll return a + // proper error here anyway so that we'll fail gracefully. + return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") + } + + for _, v := range g.Vertices() { + + if tv, ok := v.(GraphNodeAttachResourceSchema); ok { + addr := tv.ResourceAddr() + mode := addr.Resource.Mode + typeName := addr.Resource.Type + providerAddr, _ := tv.ProvidedBy() + providerType := providerAddr.ProviderConfig.Type + + schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) + tv.AttachResourceSchema(schema, version) + } + + if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { + providerAddr := tv.ProviderAddr() + schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) + tv.AttachProviderConfigSchema(schema) + } + + if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { + names := tv.ProvisionedBy() + for _, name := range names { + schema := t.Schemas.ProvisionerConfig(name) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) + tv.AttachProvisionerSchema(name, schema) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go new file mode 100644 index 000000000..f87494879 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_attach_state.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// GraphNodeAttachResourceState is an interface that can be implemented +// to request that a ResourceState is attached to the node. +// +// Due to a historical naming inconsistency, the type ResourceState actually +// represents the state for a particular _instance_, while InstanceState +// represents the values for that instance during a particular phase +// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState +// is supported only for nodes that represent resource instances, even though +// the name might suggest it is for containing resources. +type GraphNodeAttachResourceState interface { + GraphNodeResourceInstance + + // Sets the state + AttachResourceState(*states.Resource) +} + +// AttachStateTransformer goes through the graph and attaches +// state to nodes that implement the interfaces above. +type AttachStateTransformer struct { + State *states.State // State is the root state +} + +func (t *AttachStateTransformer) Transform(g *Graph) error { + // If no state, then nothing to do + if t.State == nil { + log.Printf("[DEBUG] Not attaching any node states: overall state is nil") + return nil + } + + for _, v := range g.Vertices() { + // Nodes implement this interface to request state attachment. + an, ok := v.(GraphNodeAttachResourceState) + if !ok { + continue + } + addr := an.ResourceInstanceAddr() + + rs := t.State.Resource(addr.ContainingResource()) + if rs == nil { + log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + is := rs.Instance(addr.Resource.Key) + if is == nil { + // We don't actually need this here, since we'll attach the whole + // resource state, but we still check because it'd be weird + // for the specific instance we're attaching to not to exist. + log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) + continue + } + + // make sure to attach a copy of the state, so instances can modify the + // same ResourceState. + an.AttachResourceState(rs.DeepCopy()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go new file mode 100644 index 000000000..8920761ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_config.go @@ -0,0 +1,133 @@ +package terraform + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". +type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. + Config *configs.Config + + // Unique will only add resources that aren't already present in the graph. + Unique bool + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode addrs.ResourceMode + + l sync.Mutex + uniqueMap map[string]struct{} +} + +func (t *ConfigTransformer) Transform(g *Graph) error { + // Lock since we use some internal state + t.l.Lock() + defer t.l.Unlock() + + // If no configuration is available, we don't do anything + if t.Config == nil { + return nil + } + + // Reset the uniqueness map. If we're tracking uniques, then populate + // it with addresses. + t.uniqueMap = make(map[string]struct{}) + defer func() { t.uniqueMap = nil }() + if t.Unique { + for _, v := range g.Vertices() { + if rn, ok := v.(GraphNodeResource); ok { + t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} + } + } + } + + // Start the transformation process + return t.transform(g, t.Config) +} + +func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { + // If no config, do nothing + if config == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, config); err != nil { + return err + } + + // Transform all the children. + for _, c := range config.Children { + if err := t.transform(g, c); err != nil { + return err + } + } + + return nil +} + +func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { + path := config.Path + module := config.Module + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) + + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := path.UnkeyedInstanceShim() + + allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) + for _, r := range module.ManagedResources { + allResources = append(allResources, r) + } + for _, r := range module.DataResources { + allResources = append(allResources, r) + } + + for _, r := range allResources { + relAddr := r.Addr() + + if t.ModeFilter && relAddr.Mode != t.Mode { + // Skip non-matching modes + continue + } + + addr := relAddr.Absolute(instPath) + if _, ok := t.uniqueMap[addr.String()]; ok { + // We've already seen a resource with this address. This should + // never happen, because we enforce uniqueness in the config loader. + continue + } + + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go new file mode 100644 index 000000000..892f75ec1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_count_boundary.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// CountBoundaryTransformer adds a node that depends on everything else +// so that it runs last in order to clean up the state for nodes that +// are on the "count boundary": "foo.0" when only one exists becomes "foo" +type CountBoundaryTransformer struct { + Config *configs.Config +} + +func (t *CountBoundaryTransformer) Transform(g *Graph) error { + node := &NodeCountBoundary{ + Config: t.Config, + } + g.Add(node) + + // Depends on everything + for _, v := range g.Vertices() { + // Don't connect to ourselves + if v == node { + continue + } + + // Connect! + g.Connect(dag.BasicEdge(node, v)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go new file mode 100644 index 000000000..98e088eee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_cbd.go @@ -0,0 +1,297 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// GraphNodeDestroyerCBD must be implemented by nodes that might be +// create-before-destroy destroyers, or might plan a create-before-destroy +// action. +type GraphNodeDestroyerCBD interface { + // CreateBeforeDestroy returns true if this node represents a node + // that is doing a CBD. + CreateBeforeDestroy() bool + + // ModifyCreateBeforeDestroy is called when the CBD state of a node + // is changed dynamically. This can return an error if this isn't + // allowed. + ModifyCreateBeforeDestroy(bool) error +} + +// GraphNodeAttachDestroyer is implemented by applyable nodes that have a +// companion destroy node. This allows the creation node to look up the status +// of the destroy node and determine if it needs to depose the existing state, +// or replace it. +// If a node is not marked as create-before-destroy in the configuration, but a +// dependency forces that status, only the destroy node will be aware of that +// status. +type GraphNodeAttachDestroyer interface { + // AttachDestroyNode takes a destroy node and saves a reference to that + // node in the receiver, so it can later check the status of + // CreateBeforeDestroy(). + AttachDestroyNode(n GraphNodeDestroyerCBD) +} + +// ForcedCBDTransformer detects when a particular CBD-able graph node has +// dependencies with another that has create_before_destroy set that require +// it to be forced on, and forces it on. +// +// This must be used in the plan graph builder to ensure that +// create_before_destroy settings are properly propagated before constructing +// the planned changes. This requires that the plannable resource nodes +// implement GraphNodeDestroyerCBD. +type ForcedCBDTransformer struct { +} + +func (t *ForcedCBDTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD decendent (dependent nodes), then we + // do nothing here. + if !t.hasCBDDescendent(g, v) { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) + continue + } + + // If this isn't naturally a CBD node, this means that an descendent is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } else { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) + } + } + return nil +} + +// hasCBDDescendent returns true if any descendent (node that depends on this) +// has CBD set. +func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { + s, _ := g.Descendents(v) + if s == nil { + return true + } + + for _, ov := range s.List() { + dn, ok := ov.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some descendent is CreateBeforeDestroy, so we need to follow suit + log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) + return true + } + } + + return false +} + +// CBDEdgeTransformer modifies the edges of CBD nodes that went through +// the DestroyEdgeTransformer to have the right dependencies. There are +// two real tasks here: +// +// 1. With CBD, the destroy edge is inverted: the destroy depends on +// the creation. +// +// 2. A_d must depend on resources that depend on A. This is to enable +// the destroy to only happen once nodes that depend on A successfully +// update to A. Example: adding a web server updates the load balancer +// before deleting the old web server. +// +// This transformer requires that a previous transformer has already forced +// create_before_destroy on for nodes that are depended on by explicit CBD +// nodes. This is the logic in ForcedCBDTransformer, though in practice we +// will get here by recording the CBD-ness of each change in the plan during +// the plan walk and then forcing the nodes into the appropriate setting during +// DiffTransformer when building the apply graph. +type CBDEdgeTransformer struct { + // Module and State are only needed to look up dependencies in + // any way possible. Either can be nil if not availabile. + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners so we can + // properly resolve implicit dependencies. + Schemas *Schemas + + // If the operation is a simple destroy, no transformation is done. + Destroy bool +} + +func (t *CBDEdgeTransformer) Transform(g *Graph) error { + if t.Destroy { + return nil + } + + // Go through and reverse any destroy edges + destroyMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + dern, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + continue + } + + // Find the destroy edge. There should only be one. + for _, e := range g.EdgesTo(v) { + // Not a destroy edge, ignore it + de, ok := e.(*DestroyEdge) + if !ok { + continue + } + + log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", + dag.VertexName(de.Source()), dag.VertexName(de.Target())) + + // Found it! Invert. + g.RemoveEdge(de) + applyNode := de.Source() + destroyNode := de.Target() + g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) + break + } + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCountBoth) so it'll be caught. + addr := dern.DestroyAddr() + key := addr.ContainingResource().String() + + // Add this to the list of nodes that we need to fix up + // the edges for (step 2 above in the docs). + destroyMap[key] = append(destroyMap[key], v) + } + + // If we have no CBD nodes, then our work here is done + if len(destroyMap) == 0 { + return nil + } + + // We have CBD nodes. We now have to move on to the much more difficult + // task of connecting dependencies of the creation side of the destroy + // to the destruction node. The easiest way to explain this is an example: + // + // Given a pre-destroy dependence of: A => B + // And A has CBD set. + // + // The resulting graph should be: A => B => A_d + // + // They key here is that B happens before A is destroyed. This is to + // facilitate the primary purpose for CBD: making sure that downstreams + // are properly updated to avoid downtime before the resource is destroyed. + depMap, err := t.depMap(g, destroyMap) + if err != nil { + return err + } + + // We now have the mapping of resource addresses to the destroy + // nodes they need to depend on. We now go through our own vertices to + // find any matching these addresses and make the connection. + for _, v := range g.Vertices() { + // We're looking for creators + rn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + // Get the address + addr := rn.CreateAddr() + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCount) so it'll be caught. + key := addr.ContainingResource().String() + + // If there is nothing this resource should depend on, ignore it + dns, ok := depMap[key] + if !ok { + continue + } + + // We have nodes! Make the connection + for _, dn := range dns { + log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", + dag.VertexName(dn), dag.VertexName(v)) + g.Connect(dag.BasicEdge(dn, v)) + } + } + + return nil +} + +func (t *CBDEdgeTransformer) depMap(g *Graph, destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { + // Build the list of destroy nodes that each resource address should depend + // on. For example, when we find B, we map the address of B to A_d in the + // "depMap" variable below. + depMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Get the address + addr := rn.ResourceAddr() + key := addr.String() + + // Get the destroy nodes that are destroying this resource. + // If there aren't any, then we don't need to worry about + // any connections. + dns, ok := destroyMap[key] + if !ok { + continue + } + + // Get the nodes that depend on this on. In the example above: + // finding B in A => B. Since dependencies can span modules, walk all + // descendents of the resource. + des, _ := g.Descendents(v) + for _, v := range des.List() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Keep track of the destroy nodes that this address + // needs to depend on. + key := rn.ResourceAddr().String() + depMap[key] = append(depMap[key], dns...) + } + } + + return depMap, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go new file mode 100644 index 000000000..1d211570f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_destroy_edge.go @@ -0,0 +1,323 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeDestroyer must be implemented by nodes that destroy resources. +type GraphNodeDestroyer interface { + dag.Vertex + + // DestroyAddr is the address of the resource that is being + // destroyed by this node. If this returns nil, then this node + // is not destroying anything. + DestroyAddr() *addrs.AbsResourceInstance +} + +// GraphNodeCreator must be implemented by nodes that create OR update resources. +type GraphNodeCreator interface { + // CreateAddr is the address of the resource being created or updated + CreateAddr() *addrs.AbsResourceInstance +} + +// DestroyEdgeTransformer is a GraphTransformer that creates the proper +// references for destroy resources. Destroy resources are more complex +// in that they must be depend on the destruction of resources that +// in turn depend on the CREATION of the node being destroy. +// +// That is complicated. Visually: +// +// B_d -> A_d -> A -> B +// +// Notice that A destroy depends on B destroy, while B create depends on +// A create. They're inverted. This must be done for example because often +// dependent resources will block parent resources from deleting. Concrete +// example: VPC with subnets, the VPC can't be deleted while there are +// still subnets. +type DestroyEdgeTransformer struct { + // These are needed to properly build the graph of dependencies + // to determine what a destroy node depends on. Any of these can be nil. + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners in order + // to properly resolve implicit dependencies. + Schemas *Schemas +} + +func (t *DestroyEdgeTransformer) Transform(g *Graph) error { + // Build a map of what is being destroyed (by address string) to + // the list of destroyers. Usually there will be at most one destroyer + // per node, but we allow multiple if present for completeness. + destroyers := make(map[string][]GraphNodeDestroyer) + destroyerAddrs := make(map[string]addrs.AbsResourceInstance) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + addrP := dn.DestroyAddr() + if addrP == nil { + continue + } + addr := *addrP + + key := addr.String() + log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) + destroyers[key] = append(destroyers[key], dn) + destroyerAddrs[key] = addr + } + + // If we aren't destroying anything, there will be no edges to make + // so just exit early and avoid future work. + if len(destroyers) == 0 { + return nil + } + + // Go through and connect creators to destroyers. Going along with + // our example, this makes: A_d => A + for _, v := range g.Vertices() { + cn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + addr := cn.CreateAddr() + if addr == nil { + continue + } + + key := addr.String() + ds := destroyers[key] + if len(ds) == 0 { + continue + } + + for _, d := range ds { + // For illustrating our example + a_d := d.(dag.Vertex) + a := v + + log.Printf( + "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", + dag.VertexName(a), dag.VertexName(a_d)) + + g.Connect(&DestroyEdge{S: a, T: a_d}) + + // Attach the destroy node to the creator + // There really shouldn't be more than one destroyer, but even if + // there are, any of them will represent the correct + // CreateBeforeDestroy status. + if n, ok := cn.(GraphNodeAttachDestroyer); ok { + if d, ok := d.(GraphNodeDestroyerCBD); ok { + n.AttachDestroyNode(d) + } + } + } + } + + // This is strange but is the easiest way to get the dependencies + // of a node that is being destroyed. We use another graph to make sure + // the resource is in the graph and ask for references. We have to do this + // because the node that is being destroyed may NOT be in the graph. + // + // Example: resource A is force new, then destroy A AND create A are + // in the graph. BUT if resource A is just pure destroy, then only + // destroy A is in the graph, and create A is not. + providerFn := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{NodeAbstractProvider: a} + } + steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Config: t.Config}, + + // Add outputs and metadata + &OutputTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, + &AttachStateTransformer{State: t.State}, + + // Add all the variables. We can depend on resources through + // variables due to module parameters, and we need to properly + // determine that. + &RootVariableTransformer{Config: t.Config}, + &ModuleVariableTransformer{Config: t.Config}, + + TransformProviders(nil, providerFn, t.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: t.Schemas}, + + &ReferenceTransformer{}, + } + + // Go through all the nodes being destroyed and create a graph. + // The resulting graph is only of things being CREATED. For example, + // following our example, the resulting graph would be: + // + // A, B (with no edges) + // + var tempG Graph + var tempDestroyed []dag.Vertex + for d := range destroyers { + // d is the string key for the resource being destroyed. We actually + // want the address value, which we stashed earlier. + addr := destroyerAddrs[d] + + // This part is a little bit weird but is the best way to + // find the dependencies we need to: build a graph and use the + // attach config and state transformers then ask for references. + abstract := NewNodeAbstractResourceInstance(addr) + tempG.Add(abstract) + tempDestroyed = append(tempDestroyed, abstract) + + // We also add the destroy version here since the destroy can + // depend on things that the creation doesn't (destroy provisioners). + destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} + tempG.Add(destroy) + tempDestroyed = append(tempDestroyed, destroy) + } + + // Run the graph transforms so we have the information we need to + // build references. + log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) + for _, s := range steps { + log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) + if err := s.Transform(&tempG); err != nil { + log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) + return err + } + } + log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) + + // Go through all the nodes in the graph and determine what they + // depend on. + for _, v := range tempDestroyed { + // Find all ancestors of this to determine the edges we'll depend on + vs, err := tempG.Ancestors(v) + if err != nil { + return err + } + + refs := make([]dag.Vertex, 0, vs.Len()) + for _, raw := range vs.List() { + refs = append(refs, raw.(dag.Vertex)) + } + + refNames := make([]string, len(refs)) + for i, ref := range refs { + refNames[i] = dag.VertexName(ref) + } + log.Printf( + "[TRACE] DestroyEdgeTransformer: creation node %q references %s", + dag.VertexName(v), refNames) + + // If we have no references, then we won't need to do anything + if len(refs) == 0 { + continue + } + + // Get the destroy node for this. In the example of our struct, + // we are currently at B and we're looking for B_d. + rn, ok := v.(GraphNodeResourceInstance) + if !ok { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) + continue + } + + addr := rn.ResourceInstanceAddr() + dns := destroyers[addr.String()] + + // We have dependencies, check if any are being destroyed + // to build the list of things that we must depend on! + // + // In the example of the struct, if we have: + // + // B_d => A_d => A => B + // + // Then at this point in the algorithm we started with B_d, + // we built B (to get dependencies), and we found A. We're now looking + // to see if A_d exists. + var depDestroyers []dag.Vertex + for _, v := range refs { + rn, ok := v.(GraphNodeResourceInstance) + if !ok { + continue + } + + addr := rn.ResourceInstanceAddr() + key := addr.String() + if ds, ok := destroyers[key]; ok { + for _, d := range ds { + depDestroyers = append(depDestroyers, d.(dag.Vertex)) + log.Printf( + "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", + key, dag.VertexName(d)) + } + } + } + + // Go through and make the connections. Use the variable + // names "a_d" and "b_d" to reference our example. + for _, a_d := range dns { + for _, b_d := range depDestroyers { + if b_d != a_d { + log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) + g.Connect(dag.BasicEdge(b_d, a_d)) + } + } + } + } + + return t.pruneResources(g) +} + +// If there are only destroy instances for a particular resource, there's no +// reason for the resource node to prepare the state. Remove Resource nodes so +// that they don't fail by trying to evaluate a resource that is only being +// destroyed along with its dependencies. +func (t *DestroyEdgeTransformer) pruneResources(g *Graph) error { + for _, v := range g.Vertices() { + n, ok := v.(*NodeApplyableResource) + if !ok { + continue + } + + // if there are only destroy dependencies, we don't need this node + des, err := g.Descendents(n) + if err != nil { + return err + } + + descendents := des.List() + nonDestroyInstanceFound := false + for _, v := range descendents { + if _, ok := v.(*NodeApplyableResourceInstance); ok { + nonDestroyInstanceFound = true + break + } + } + + if nonDestroyInstanceFound { + continue + } + + // connect all the through-edges, then delete the node + for _, d := range g.DownEdges(n).List() { + for _, u := range g.UpEdges(n).List() { + g.Connect(dag.BasicEdge(u, d)) + } + } + log.Printf("DestroyEdgeTransformer: pruning unused resource node %s", dag.VertexName(n)) + g.Remove(n) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go new file mode 100644 index 000000000..b7a237fce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_diff.go @@ -0,0 +1,184 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/plans" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// DiffTransformer is a GraphTransformer that adds graph nodes representing +// each of the resource changes described in the given Changes object. +type DiffTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + State *states.State + Changes *plans.Changes +} + +func (t *DiffTransformer) Transform(g *Graph) error { + if t.Changes == nil || len(t.Changes.Resources) == 0 { + // Nothing to do! + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] DiffTransformer starting") + + var diags tfdiags.Diagnostics + state := t.State + changes := t.Changes + + // DiffTransformer creates resource _instance_ nodes. If there are any + // whole-resource nodes already in the graph, we must ensure that they + // get evaluated before any of the corresponding instances by creating + // dependency edges, so we'll do some prep work here to ensure we'll only + // create connections to nodes that existed before we started here. + resourceNodes := map[string][]GraphNodeResource{} + for _, node := range g.Vertices() { + rn, ok := node.(GraphNodeResource) + if !ok { + continue + } + // We ignore any instances that _also_ implement + // GraphNodeResourceInstance, since in the unlikely event that they + // do exist we'd probably end up creating cycles by connecting them. + if _, ok := node.(GraphNodeResourceInstance); ok { + continue + } + + addr := rn.ResourceAddr().String() + resourceNodes[addr] = append(resourceNodes[addr], rn) + } + + for _, rc := range changes.Resources { + addr := rc.Addr + dk := rc.DeposedKey + + log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) + + // Depending on the action we'll need some different combinations of + // nodes, because destroying uses a special node type separate from + // other actions. + var update, delete, createBeforeDestroy bool + switch rc.Action { + case plans.NoOp: + continue + case plans.Delete: + delete = true + case plans.DeleteThenCreate, plans.CreateThenDelete: + update = true + delete = true + createBeforeDestroy = (rc.Action == plans.CreateThenDelete) + default: + update = true + } + + if dk != states.NotDeposed && update { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change for deposed object", + fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), + )) + continue + } + + // If we're going to do a create_before_destroy Replace operation then + // we need to allocate a DeposedKey to use to retain the + // not-yet-destroyed prior object, so that the delete node can destroy + // _that_ rather than the newly-created node, which will be current + // by the time the delete node is visited. + if update && delete && createBeforeDestroy { + // In this case, variable dk will be the _pre-assigned_ DeposedKey + // that must be used if the update graph node deposes the current + // instance, which will then align with the same key we pass + // into the destroy node to ensure we destroy exactly the deposed + // object we expect. + if state != nil { + ris := state.ResourceInstance(addr) + if ris == nil { + // Should never happen, since we don't plan to replace an + // instance that doesn't exist yet. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change", + fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), + )) + continue + } + + // Allocating a deposed key separately from using it can be racy + // in general, but we assume here that nothing except the apply + // node we instantiate below will actually make new deposed objects + // in practice, and so the set of already-used keys will not change + // between now and then. + dk = ris.FindUnusedDeposedKey() + } else { + // If we have no state at all yet then we can use _any_ + // DeposedKey. + dk = states.NewDeposedKey() + } + } + + if update { + // All actions except destroying the node type chosen by t.Concrete + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + if createBeforeDestroy { + // We'll attach our pre-allocated DeposedKey to the node if + // it supports that. NodeApplyableResourceInstance is the + // specific concrete node type we are looking for here really, + // since that's the only node type that might depose objects. + if dn, ok := node.(GraphNodeDeposer); ok { + dn.SetPreallocatedDeposedKey(dk) + } + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) + } else { + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) + } + + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + if delete { + // Destroying always uses a destroy-specific node type, though + // which one depends on whether we're destroying a current object + // or a deposed object. + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeDestroyResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) + } else { + node = &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } + if dk == states.NotDeposed { + log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) + } else { + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) + } + g.Add(node) + } + + } + + log.Printf("[TRACE] DiffTransformer complete") + + return diags.Err() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go new file mode 100644 index 000000000..03eac685e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_expand.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeExapndable is an interface that nodes can implement to +// signal that they can be expanded. Expanded nodes turn into +// GraphNodeSubgraph nodes within the graph. +type GraphNodeExpandable interface { + Expand(GraphBuilder) (GraphNodeSubgraph, error) +} + +// GraphNodeDynamicExpandable is an interface that nodes can implement +// to signal that they can be expanded at eval-time (hence dynamic). +// These nodes are given the eval context and are expected to return +// a new subgraph. +type GraphNodeDynamicExpandable interface { + DynamicExpand(EvalContext) (*Graph, error) +} + +// GraphNodeSubgraph is an interface a node can implement if it has +// a larger subgraph that should be walked. +type GraphNodeSubgraph interface { + Subgraph() dag.Grapher +} + +// ExpandTransform is a transformer that does a subgraph expansion +// at graph transform time (vs. at eval time). The benefit of earlier +// subgraph expansion is that errors with the graph build can be detected +// at an earlier stage. +type ExpandTransform struct { + Builder GraphBuilder +} + +func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { + ev, ok := v.(GraphNodeExpandable) + if !ok { + // This isn't an expandable vertex, so just ignore it. + return v, nil + } + + // Expand the subgraph! + log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) + return ev.Expand(t.Builder) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go new file mode 100644 index 000000000..2ce23ddbe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_provider.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportProviderValidateTransformer is a GraphTransformer that goes through +// the providers in the graph and validates that they only depend on variables. +type ImportProviderValidateTransformer struct{} + +func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + var diags tfdiags.Diagnostics + + for _, v := range g.Vertices() { + // We only care about providers + pv, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // We only care about providers that reference things + rn, ok := pv.(GraphNodeReferencer) + if !ok { + continue + } + + for _, ref := range rn.References() { + if _, ok := ref.Subject.(addrs.InputVariable); !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider dependency for import", + Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } + } + + return diags.Err() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go new file mode 100644 index 000000000..7dd2c4876 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_import_state.go @@ -0,0 +1,239 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/providers" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// ImportStateTransformer is a GraphTransformer that adds nodes to the +// graph to represent the imports we want to do for resources. +type ImportStateTransformer struct { + Targets []*ImportTarget +} + +func (t *ImportStateTransformer) Transform(g *Graph) error { + for _, target := range t.Targets { + // The ProviderAddr may not be supplied for non-aliased providers. + // This will be populated if the targets come from the cli, but tests + // may not specify implied provider addresses. + providerAddr := target.ProviderAddr + if providerAddr.ProviderConfig.Type == "" { + providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) + } + + node := &graphNodeImportState{ + Addr: target.Addr, + ID: target.ID, + ProviderAddr: providerAddr, + } + g.Add(node) + } + return nil +} + +type graphNodeImportState struct { + Addr addrs.AbsResourceInstance // Addr is the resource address to import into + ID string // ID is the ID to import as + ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type + ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution + + states []providers.ImportedResource +} + +var ( + _ GraphNodeSubPath = (*graphNodeImportState)(nil) + _ GraphNodeEvalable = (*graphNodeImportState)(nil) + _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) + _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) +) + +func (n *graphNodeImportState) Name() string { + return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr, false +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { + n.ResolvedProvider = addr +} + +// GraphNodeSubPath +func (n *graphNodeImportState) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportState) EvalTree() EvalNode { + var provider providers.Interface + + // Reset our states + n.states = nil + + // Return our sequence + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + }, + &EvalImportState{ + Addr: n.Addr.Resource, + Provider: &provider, + ID: n.ID, + Output: &n.states, + }, + }, + } +} + +// GraphNodeDynamicExpandable impl. +// +// We use DynamicExpand as a way to generate the subgraph of refreshes +// and state inserts we need to do for our import state. Since they're new +// resources they don't depend on anything else and refreshes are isolated +// so this is nearly a perfect use case for dynamic expand. +func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + g := &Graph{Path: ctx.Path()} + + // nameCounter is used to de-dup names in the state. + nameCounter := make(map[string]int) + + // Compile the list of addresses that we'll be inserting into the state. + // We do this ahead of time so we can verify that we aren't importing + // something that already exists. + addrs := make([]addrs.AbsResourceInstance, len(n.states)) + for i, state := range n.states { + addr := n.Addr + if t := state.TypeName; t != "" { + addr.Resource.Resource.Type = t + } + + // Determine if we need to suffix the name to de-dup + key := addr.String() + count, ok := nameCounter[key] + if ok { + count++ + addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) + } + nameCounter[key] = count + + // Add it to our list + addrs[i] = addr + } + + // Verify that all the addresses are clear + state := ctx.State() + for _, addr := range addrs { + existing := state.ResourceInstance(addr) + if existing != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource already managed by Terraform", + fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), + )) + continue + } + } + if diags.HasErrors() { + // Bail out early, then. + return nil, diags.Err() + } + + // For each of the states, we add a node to handle the refresh/add to state. + // "n.states" is populated by our own EvalTree with the result of + // ImportState. Since DynamicExpand is always called after EvalTree, this + // is safe. + for i, state := range n.states { + g.Add(&graphNodeImportStateSub{ + TargetAddr: addrs[i], + State: state, + ResolvedProvider: n.ResolvedProvider, + }) + } + + // Root transform for a single root + t := &RootTransformer{} + if err := t.Transform(g); err != nil { + return nil, err + } + + // Done! + return g, diags.Err() +} + +// graphNodeImportStateSub is the sub-node of graphNodeImportState +// and is part of the subgraph. This node is responsible for refreshing +// and adding a resource to the state once it is imported. +type graphNodeImportStateSub struct { + TargetAddr addrs.AbsResourceInstance + State providers.ImportedResource + ResolvedProvider addrs.AbsProviderConfig +} + +var ( + _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) + _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) +) + +func (n *graphNodeImportStateSub) Name() string { + return fmt.Sprintf("import %s result", n.TargetAddr) +} + +func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { + return n.TargetAddr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportStateSub) EvalTree() EvalNode { + // If the Ephemeral type isn't set, then it is an error + if n.State.TypeName == "" { + err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) + return &EvalReturnError{Error: &err} + } + + state := n.State.AsInstanceObject() + + var provider providers.Interface + var providerSchema *ProviderSchema + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalRefresh{ + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalImportStateVerify{ + Addr: n.TargetAddr.Resource, + State: &state, + }, + &EvalWriteState{ + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go new file mode 100644 index 000000000..b97dea2ab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_local.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Config *configs.Config +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Config) +} + +func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { + if c == nil { + // Can't have any locals if there's no config + return nil + } + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, local := range c.Module.Locals { + addr := path.LocalValue(local.Name) + node := &NodeLocal{ + Addr: addr, + Config: local, + } + g.Add(node) + } + + // Also populate locals for child modules + for _, cc := range c.Children { + if err := t.transformModule(g, cc); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go new file mode 100644 index 000000000..caa4b6a63 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_module_variable.go @@ -0,0 +1,126 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// ModuleVariableTransformer is a GraphTransformer that adds all the variables +// in the configuration to the graph. +// +// Any "variable" block present in any non-root module is included here, even +// if a particular variable is not referenced from anywhere. +// +// The transform will produce errors if a call to a module does not conform +// to the expected set of arguments, but this transformer is not in a good +// position to return errors and so the validate walk should include specific +// steps for validating module blocks, separate from this transform. +type ModuleVariableTransformer struct { + Config *configs.Config +} + +func (t *ModuleVariableTransformer) Transform(g *Graph) error { + return t.transform(g, nil, t.Config) +} + +func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { + // We can have no variables if we have no configuration. + if c == nil { + return nil + } + + // Transform all the children first. + for _, cc := range c.Children { + if err := t.transform(g, c, cc); err != nil { + return err + } + } + + // If we're processing anything other than the root module then we'll + // add graph nodes for variables defined inside. (Variables for the root + // module are dealt with in RootVariableTransformer). + // If we have a parent, we can determine if a module variable is being + // used, so we transform this. + if parent != nil { + if err := t.transformSingle(g, parent, c); err != nil { + return err + } + } + + return nil +} + +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + _, call := path.Call() + + // Find the call in the parent module configuration, so we can get the + // expressions given for each input variable at the call site. + callConfig, exists := parent.Module.ModuleCalls[call.Name] + if !exists { + // This should never happen, since it indicates an improperly-constructed + // configuration tree. + panic(fmt.Errorf("no module call block found for %s", path)) + } + + // We need to construct a schema for the expected call arguments based on + // the configured variables in our config, which we can then use to + // decode the content of the call block. + schema := &hcl.BodySchema{} + for _, v := range c.Module.Variables { + schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ + Name: v.Name, + Required: v.Default == cty.NilVal, + }) + } + + content, contentDiags := callConfig.Config.Content(schema) + if contentDiags.HasErrors() { + // Validation code elsewhere should deal with any errors before we + // get in here, but we'll report them out here just in case, to + // avoid crashes. + var diags tfdiags.Diagnostics + diags = diags.Append(contentDiags) + return diags.Err() + } + + for _, v := range c.Module.Variables { + var expr hcl.Expression + if attr := content.Attributes[v.Name]; attr != nil { + expr = attr.Expr + } else { + // No expression provided for this variable, so we'll make a + // synthetic one using the variable's default value. + expr = &hclsyntax.LiteralValueExpr{ + Val: v.Default, + SrcRange: v.DeclRange, // This is not exact, but close enough + } + } + + // For now we treat all module variables as "applyable", even though + // such nodes are valid to use on other walks too. We may specialize + // this in future if we find reasons to employ different behaviors + // in different scenarios. + node := &NodeApplyableModuleVariable{ + Addr: path.InputVariable(v.Name), + Config: v, + Expr: expr, + } + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go new file mode 100644 index 000000000..4d1323fb0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_count.go @@ -0,0 +1,175 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" + "github.com/zclconf/go-cty/cty" +) + +// OrphanResourceCountTransformer is a GraphTransformer that adds orphans +// for an expanded count to the graph. The determination of this depends +// on the count argument given. +// +// Orphans are found by comparing the count to what is found in the state. +// This transform assumes that if an element in the state is within the count +// bounds given, that it is not an orphan. +type OrphanResourceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + Count int // Actual count of the resource, or -1 if count is not set at all + ForEach map[string]cty.Value // The ForEach map on the resource + Addr addrs.AbsResource // Addr of the resource to look for orphans + State *states.State // Full global state +} + +func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { + rs := t.State.Resource(t.Addr) + if rs == nil { + return nil // Resource doesn't exist in state, so nothing to do! + } + + haveKeys := make(map[addrs.InstanceKey]struct{}) + for key := range rs.Instances { + haveKeys[key] = struct{}{} + } + + // if for_each is set, use that transformer + if t.ForEach != nil { + return t.transformForEach(haveKeys, g) + } + if t.Count < 0 { + return t.transformNoCount(haveKeys, g) + } + if t.Count == 0 { + return t.transformZeroCount(haveKeys, g) + } + return t.transformCount(haveKeys, g) +} + +func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // If there is a NoKey node, add this to the graph first, + // so that we can create edges to it in subsequent (StringKey) nodes. + // This is because the last item determines the resource mode for the whole resource, + // (see SetResourceInstanceCurrent for more information) and we need to evaluate + // an orphaned (NoKey) resource before the in-memory state is updated + // to deal with a new for_each resource + _, hasNoKeyNode := haveKeys[addrs.NoKey] + var noKeyNode dag.Vertex + if hasNoKeyNode { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(addrs.NoKey)) + noKeyNode = abstract + if f := t.Concrete; f != nil { + noKeyNode = f(abstract) + } + g.Add(noKeyNode) + } + + for key := range haveKeys { + // If the key is no-key, we have already added it, so skip + if key == addrs.NoKey { + continue + } + + s, _ := key.(addrs.StringKey) + // If the key is present in our current for_each, carry on + if _, ok := t.ForEach[string(s)]; ok { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + + // Add edge to noKeyNode if it exists + if hasNoKeyNode { + g.Connect(dag.BasicEdge(node, noKeyNode)) + } + } + return nil +} + +func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Due to the logic in Transform, we only get in here if our count is + // at least one. + + _, have0Key := haveKeys[addrs.IntKey(0)] + + for key := range haveKeys { + if key == addrs.NoKey && !have0Key { + // If we have no 0-key then we will accept a no-key instance + // as an alias for it. + continue + } + + i, isInt := key.(addrs.IntKey) + if isInt && int(i) < t.Count { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} + +func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // This case is easy: we need to orphan any keys we have at all. + + for key := range haveKeys { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} + +func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Negative count indicates that count is not set at all, in which + // case we expect to have a single instance with no key set at all. + // However, we'll also accept an instance with key 0 set as an alias + // for it, in case the user has just deleted the "count" argument and + // so wants to keep the first instance in the set. + + _, haveNoKey := haveKeys[addrs.NoKey] + _, have0Key := haveKeys[addrs.IntKey(0)] + keepKey := addrs.NoKey + if have0Key && !haveNoKey { + // If we don't have a no-key instance then we can use the 0-key instance + // instead. + keepKey = addrs.IntKey(0) + } + + for key := range haveKeys { + if key == keepKey { + continue + } + + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go new file mode 100644 index 000000000..cab10da12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_output.go @@ -0,0 +1,60 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// OrphanOutputTransformer finds the outputs that aren't present +// in the given config that are in the state and adds them to the graph +// for deletion. +type OrphanOutputTransformer struct { + Config *configs.Config // Root of config tree + State *states.State // State is the root state +} + +func (t *OrphanOutputTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[DEBUG] No state, no orphan outputs") + return nil + } + + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + return nil +} + +func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the config for this path, which is nil if the entire module has been + // removed. + var outputs map[string]*configs.Output + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + outputs = c.Module.Outputs + } + + // An output is "orphaned" if it's present in the state but not declared + // in the configuration. + for name := range ms.OutputValues { + if _, exists := outputs[name]; exists { + continue + } + + g.Add(&NodeOutputOrphan{ + Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), + }) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go new file mode 100644 index 000000000..f927b1086 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_orphan_resource.go @@ -0,0 +1,179 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned +// resource instances to the graph. An "orphan" is an instance that is present +// in the state but belongs to a resource that is no longer present in the +// configuration. +// +// This is not the transformer that deals with "count orphans" (instances that +// are no longer covered by a resource's "count" or "for_each" setting); that's +// handled instead by OrphanResourceCountTransformer. +type OrphanResourceInstanceTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + + // State is the global state. We require the global state to + // properly find module orphans at our path. + State *states.State + + // Config is the root node in the configuration tree. We'll look up + // the appropriate note in this tree using the path in each node. + Config *configs.Config +} + +func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceInstanceTransformer used without setting Config") + } + + // Go through the modules and for each module transform in order + // to add the orphan. + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + + return nil +} + +func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { + return nil + } + + moduleAddr := ms.Addr + + // Get the configuration for this module. The configuration might be + // nil if the module was removed from the configuration. This is okay, + // this just means that every resource is an orphan. + var m *configs.Module + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + m = c.Module + } + + // An "orphan" is a resource that is in the state but not the configuration, + // so we'll walk the state resources and try to correlate each of them + // with a configuration block. Each orphan gets a node in the graph whose + // type is decided by t.Concrete. + // + // We don't handle orphans related to changes in the "count" and "for_each" + // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. + for _, rs := range ms.Resources { + if m != nil { + if r := m.ResourceByAddr(rs.Addr); r != nil { + continue + } + } + + for key := range rs.Instances { + addr := rs.Addr.Instance(key).Absolute(moduleAddr) + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) + g.Add(node) + } + } + + return nil +} + +// OrphanResourceTransformer is a GraphTransformer that adds orphaned +// resources to the graph. An "orphan" is a resource that is present in +// the state but no longer present in the config. +// +// This is separate to OrphanResourceInstanceTransformer in that it deals with +// whole resources, rather than individual instances of resources. Orphan +// resource nodes are only used during apply to clean up leftover empty +// resource state skeletons, after all of the instances inside have been +// removed. +// +// This transformer will also create edges in the graph to any pre-existing +// node that creates or destroys the entire orphaned resource or any of its +// instances, to ensure that the "orphan-ness" of a resource is always dealt +// with after all other aspects of it. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. + State *states.State + + // Config is the root node in the configuration tree. + Config *configs.Config +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceTransformer used without setting Config") + } + + // We'll first collect up the existing nodes for each resource so we can + // create dependency edges for any new nodes we create. + deps := map[string][]dag.Vertex{} + for _, v := range g.Vertices() { + switch tv := v.(type) { + case GraphNodeResourceInstance: + k := tv.ResourceInstanceAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + case GraphNodeResource: + k := tv.ResourceAddr().String() + deps[k] = append(deps[k], v) + case GraphNodeDestroyer: + k := tv.DestroyAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + } + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed + + for _, rs := range ms.Resources { + if mc != nil { + if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { + // It's in the config, so nothing to do for this one. + continue + } + } + + addr := rs.Addr.Absolute(moduleAddr) + abstract := NewNodeAbstractResource(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) + g.Add(node) + for _, dn := range deps[addr.String()] { + log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) + g.Connect(dag.BasicEdge(node, dn)) + } + } + } + + return nil + +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go new file mode 100644 index 000000000..e2979ac5c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_output.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Config *configs.Config +} + +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Config) +} + +func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { + // If we have no config then there can be no outputs. + if c == nil { + return nil + } + + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, o := range c.Module.Outputs { + addr := path.OutputValue(o.Name) + node := &NodeApplyableOutput{ + Addr: addr, + Config: o, + } + g.Add(node) + } + + return nil +} + +// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete +// outputs during destroy. We need to do this to ensure that no stale outputs +// are ever left in the state. +type DestroyOutputTransformer struct { +} + +func (t *DestroyOutputTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + output, ok := v.(*NodeApplyableOutput) + if !ok { + continue + } + + // create the destroy node for this output + node := &NodeDestroyableOutput{ + Addr: output.Addr, + Config: output.Config, + } + + log.Printf("[TRACE] creating %s", node.Name()) + g.Add(node) + + deps, err := g.Descendents(v) + if err != nil { + return err + } + + // the destroy node must depend on the eval node + deps.Add(v) + + for _, d := range deps.List() { + log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) + g.Connect(dag.BasicEdge(node, d)) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go new file mode 100644 index 000000000..9c8966fac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provider.go @@ -0,0 +1,705 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Config: config, + Providers: providers, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Providers: providers, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{ + Config: config, + }, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + // Connect provider to their parent provider nodes + &ParentProviderTransformer{}, + ) +} + +// GraphNodeProvider is an interface that nodes that can be a provider +// must implement. +// +// ProviderAddr returns the address of the provider configuration this +// satisfies, which is relative to the path returned by method Path(). +// +// Name returns the full name of the provider in the config. +type GraphNodeProvider interface { + GraphNodeSubPath + ProviderAddr() addrs.AbsProviderConfig + Name() string +} + +// GraphNodeCloseProvider is an interface that nodes that can be a close +// provider must implement. The CloseProviderName returned is the name of +// the provider they satisfy. +type GraphNodeCloseProvider interface { + GraphNodeSubPath + CloseProviderAddr() addrs.AbsProviderConfig +} + +// GraphNodeProviderConsumer is an interface that nodes that require +// a provider must implement. ProvidedBy must return the address of the provider +// to use, which will be resolved to a configuration either in the same module +// or in an ancestor module, with the resulting absolute address passed to +// SetProvider. +type GraphNodeProviderConsumer interface { + // ProvidedBy returns the address of the provider configuration the node + // refers to. If the returned "exact" value is true, this address will + // be taken exactly. If "exact" is false, a provider configuration from + // an ancestor module may be selected instead. + ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) + // Set the resolved provider address for this resource. + SetProvider(addrs.AbsProviderConfig) +} + +// ProviderTransformer is a GraphTransformer that maps resources to +// providers within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProviderTransformer struct { + Config *configs.Config +} + +func (t *ProviderTransformer) Transform(g *Graph) error { + // We need to find a provider configuration address for each resource + // either directly represented by a node or referenced by a node in + // the graph, and then create graph edges from provider to provider user + // so that the providers will get initialized first. + + var diags tfdiags.Diagnostics + + // To start, we'll collect the _requested_ provider addresses for each + // node, which we'll then resolve (handling provider inheritence, etc) in + // the next step. + // Our "requested" map is from graph vertices to string representations of + // provider config addresses (for deduping) to requests. + type ProviderRequest struct { + Addr addrs.AbsProviderConfig + Exact bool // If true, inheritence from parent modules is not attempted + } + requested := map[dag.Vertex]map[string]ProviderRequest{} + needConfigured := map[string]addrs.AbsProviderConfig{} + for _, v := range g.Vertices() { + + // Does the vertex _directly_ use a provider? + if pv, ok := v.(GraphNodeProviderConsumer); ok { + requested[v] = make(map[string]ProviderRequest) + + p, exact := pv.ProvidedBy() + if exact { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) + } else { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) + } + + requested[v][p.String()] = ProviderRequest{ + Addr: p, + Exact: exact, + } + + // Direct references need the provider configured as well as initialized + needConfigured[p.String()] = p + } + } + + // Now we'll go through all the requested addresses we just collected and + // figure out which _actual_ config address each belongs to, after resolving + // for provider inheritance and passing. + m := providerVertexMap(g) + for v, reqs := range requested { + for key, req := range reqs { + p := req.Addr + target := m[key] + + _, ok := v.(GraphNodeSubPath) + if !ok && target == nil { + // No target and no path to traverse up from + diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) + continue + } + + if target != nil { + log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) + } + + // if we don't have a provider at this level, walk up the path looking for one, + // unless we were told to be exact. + if target == nil && !req.Exact { + for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { + key := pp.String() + target = m[key] + if target != nil { + log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) + break + } + log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) + } + } + + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + if _, exists := needConfigured[key]; target == nil && !exists { + stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, + } + m[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) + target = stub + g.Add(target) + } + + if target == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider configuration not present", + fmt.Sprintf( + "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", + dag.VertexName(v), p, dag.VertexName(v), + ), + )) + break + } + + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).ProviderAddr().String() + } + + log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) + if pv, ok := v.(GraphNodeProviderConsumer); ok { + pv.SetProvider(target.ProviderAddr()) + } + g.Connect(dag.BasicEdge(v, target)) + } + } + + return diags.Err() +} + +// CloseProviderTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provider connections that aren't needed anymore. +// A provider connection is not needed anymore once all depended resources +// in the graph are evaluated. +type CloseProviderTransformer struct{} + +func (t *CloseProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) + var err error + + for _, v := range pm { + p := v.(GraphNodeProvider) + key := p.ProviderAddr().String() + + // get the close provider of this type if we alread created it + closer := cpm[key] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} + g.Add(closer) + cpm[key] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p).List() { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) + } + } + } + + return err +} + +// MissingProviderTransformer is a GraphTransformer that adds to the graph +// a node for each default provider configuration that is referenced by another +// node but not already present in the graph. +// +// These "default" nodes are always added to the root module, regardless of +// where they are requested. This is important because our inheritance +// resolution behavior in ProviderTransformer will then treat these as a +// last-ditch fallback after walking up the tree, rather than preferring them +// as it would if they were placed in the same module as the requester. +// +// This transformer may create extra nodes that are not needed in practice, +// due to overriding provider configurations in child modules. +// PruneProviderTransformer can then remove these once ProviderTransformer +// has resolved all of the inheritence, etc. +type MissingProviderTransformer struct { + // Providers is the list of providers we support. + Providers []string + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc +} + +func (t *MissingProviderTransformer) Transform(g *Graph) error { + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } + } + + var err error + m := providerVertexMap(g) + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProviderConsumer) + if !ok { + continue + } + + // For our work here we actually care only about the provider type and + // we plan to place all default providers in the root module, and so + // it's safe for us to rely on ProvidedBy here rather than waiting for + // the later proper resolution of provider inheritance done by + // ProviderTransformer. + p, _ := pv.ProvidedBy() + if p.ProviderConfig.Alias != "" { + // We do not create default aliased configurations. + log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) + continue + } + + // We're going to create an implicit _default_ configuration for the + // referenced provider type in the _root_ module, ignoring all other + // aspects of the resource's declared provider address. + defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) + key := defaultAddr.String() + provider := m[key] + + if provider != nil { + // There's already an explicit default configuration for this + // provider type in the root module, so we have nothing to do. + continue + } + + log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) + + // create the missing top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + Addr: defaultAddr, + }).(GraphNodeProvider) + + g.Add(provider) + m[key] = provider + } + + return err +} + +// ParentProviderTransformer connects provider nodes to their parents. +// +// This works by finding nodes that are both GraphNodeProviders and +// GraphNodeSubPath. It then connects the providers to their parent +// path. The parent provider is always at the root level. +type ParentProviderTransformer struct{} + +func (t *ParentProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + for _, v := range g.Vertices() { + // Only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // Also require non-empty path, since otherwise we're in the root + // module and so cannot have a parent. + if len(pn.Path()) <= 1 { + continue + } + + // this provider may be disabled, but we can only get it's name from + // the ProviderName string + addr := pn.ProviderAddr() + parentAddr, ok := addr.Inherited() + if ok { + parent := pm[parentAddr.String()] + if parent != nil { + g.Connect(dag.BasicEdge(v, parent)) + } + } + } + return nil +} + +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. +type PruneProviderTransformer struct{} + +func (t *PruneProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + _, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) + g.Remove(v) + } + } + + return nil +} + +func providerVertexMap(g *Graph) map[string]GraphNodeProvider { + m := make(map[string]GraphNodeProvider) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvider); ok { + addr := pv.ProviderAddr() + m[addr.String()] = pv + } + } + + return m +} + +type graphNodeCloseProvider struct { + Addr addrs.AbsProviderConfig +} + +var ( + _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) +) + +func (n *graphNodeCloseProvider) Name() string { + return n.Addr.String() + " (close)" +} + +// GraphNodeSubPath impl. +func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvider) EvalTree() EvalNode { + return CloseProviderEvalTree(n.Addr) +} + +// GraphNodeDependable impl. +func (n *graphNodeCloseProvider) DependableName() []string { + return []string{n.Name()} +} + +func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { + return n.Addr +} + +// GraphNodeDotter impl. +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} + +// RemovableIfNotTargeted +func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + addr addrs.AbsProviderConfig + target GraphNodeProvider +} + +var ( + _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) +) + +func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.addr +} + +func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { + return n.addr.Module +} + +func (n *graphNodeProxyProvider) Name() string { + return n.addr.String() + " (proxy)" +} + +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Config is the root node of the configuration tree to add providers from. + Config *configs.Config +} + +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no configuration is given, we don't do anything + if t.Config == nil { + return nil + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Config); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) +} + +func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { + // If no config, do nothing + if c == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, c); err != nil { + return err + } + + // Transform all the children. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { + // Get the module associated with this configuration tree node + mod := c.Module + staticPath := c.Path + + // We actually need a dynamic module path here, but we've not yet updated + // our graph builders enough to support expansion of module calls with + // "count" and "for_each" set, so for now we'll shim this by converting to + // a dynamic path with no keys. At the time of writing this is the only + // possible kind of dynamic path anyway. + path := make(addrs.ModuleInstance, len(staticPath)) + for i, name := range staticPath { + path[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + + // add all providers from the configuration + for _, p := range mod.ProviderConfigs { + relAddr := p.Addr() + addr := relAddr.Absolute(path) + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + // Add it to the graph + g.Add(v) + key := addr.String() + t.providers[key] = v.(GraphNodeProvider) + + // A provider configuration is "proxyable" if its configuration is + // entirely empty. This means it's standing in for a provider + // configuration that must be passed in from the parent module. + // We decide this by evaluating the config with an empty schema; + // if this succeeds, then we know there's nothing in the body. + _, diags := p.Config.Content(&hcl.BodySchema{}) + t.proxiable[key] = !diags.HasErrors() + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, c) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { + path := c.Path + + // can't add proxies at the root + if len(path) == 0 { + return nil + } + + parentPath, callAddr := path.Call() + parent := c.Parent + if parent == nil { + return nil + } + + callName := callAddr.Name + var parentCfg *configs.ModuleCall + for name, mod := range parent.Module.ModuleCalls { + if name == callName { + parentCfg = mod + break + } + } + + // We currently don't support count/for_each for modules and so we must + // shim our path and parentPath into module instances here so that the + // rest of Terraform can behave as if we do. This shimming should be + // removed later as part of implementing count/for_each for modules. + instPath := make(addrs.ModuleInstance, len(path)) + for i, name := range path { + instPath[i] = addrs.ModuleInstanceStep{Name: name} + } + parentInstPath := make(addrs.ModuleInstance, len(parentPath)) + for i, name := range parentPath { + parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", c.Path.String()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for _, pair := range parentCfg.Providers { + fullAddr := pair.InChild.Addr().Absolute(instPath) + fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) + fullName := fullAddr.String() + fullParentName := fullParentAddr.String() + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + addr: fullAddr, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } + + // aliased configurations can't be implicitly passed in + if fullAddr.ProviderConfig.Alias != "" { + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + addr := apn.ProviderAddr() + + // Get the configuration. + mc := t.Config.DescendentForInstance(addr.Module) + if mc == nil { + log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) + continue + } + + // Go through the provider configs to find the matching config + for _, p := range mc.Module.ProviderConfigs { + if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { + log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) + apn.AttachProvider(p) + break + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go new file mode 100644 index 000000000..e6fe25dac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_provisioner.go @@ -0,0 +1,205 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeProvisioner is an interface that nodes that can be a provisioner +// must implement. The ProvisionerName returned is the name of the provisioner +// they satisfy. +type GraphNodeProvisioner interface { + ProvisionerName() string +} + +// GraphNodeCloseProvisioner is an interface that nodes that can be a close +// provisioner must implement. The CloseProvisionerName returned is the name +// of the provisioner they satisfy. +type GraphNodeCloseProvisioner interface { + CloseProvisionerName() string +} + +// GraphNodeProvisionerConsumer is an interface that nodes that require +// a provisioner must implement. ProvisionedBy must return the names of the +// provisioners to use. +type GraphNodeProvisionerConsumer interface { + ProvisionedBy() []string +} + +// ProvisionerTransformer is a GraphTransformer that maps resources to +// provisioners within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProvisionerTransformer struct{} + +func (t *ProvisionerTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to provisioners they need + var err error + m := provisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + key := provisionerMapKey(p, pv) + if m[key] == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: provisioner %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) + g.Connect(dag.BasicEdge(v, m[key])) + } + } + } + + return err +} + +// MissingProvisionerTransformer is a GraphTransformer that adds nodes +// for missing provisioners into the graph. +type MissingProvisionerTransformer struct { + // Provisioners is the list of provisioners we support. + Provisioners []string +} + +func (t *MissingProvisionerTransformer) Transform(g *Graph) error { + // Create a set of our supported provisioners + supported := make(map[string]struct{}, len(t.Provisioners)) + for _, v := range t.Provisioners { + supported[v] = struct{}{} + } + + // Get the map of provisioners we already have in our graph + m := provisionerVertexMap(g) + + // Go through all the provisioner consumers and make sure we add + // that provisioner if it is missing. + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProvisionerConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + path := addrs.RootModuleInstance + if sp, ok := pv.(GraphNodeSubPath); ok { + path = sp.Path() + } + + for _, p := range pv.ProvisionedBy() { + // Build the key for storing in the map + key := provisionerMapKey(p, pv) + + if _, ok := m[key]; ok { + // This provisioner already exists as a configure node + continue + } + + if _, ok := supported[p]; !ok { + // If we don't support the provisioner type, we skip it. + // Validation later will catch this as an error. + continue + } + + // Build the vertex + var newV dag.Vertex = &NodeProvisioner{ + NameValue: p, + PathValue: path, + } + + // Add the missing provisioner node to the graph + m[key] = g.Add(newV) + log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) + } + } + + return nil +} + +// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provisioner connections that aren't needed +// anymore. A provisioner connection is not needed anymore once all depended +// resources in the graph are evaluated. +type CloseProvisionerTransformer struct{} + +func (t *CloseProvisionerTransformer) Transform(g *Graph) error { + m := closeProvisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + source := m[p] + + if source == nil { + // Create a new graphNodeCloseProvisioner and add it to the graph + source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} + g.Add(source) + + // Make sure we also add the new graphNodeCloseProvisioner to the map + // so we don't create and add any duplicate graphNodeCloseProvisioners. + m[p] = source + } + + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return nil +} + +// provisionerMapKey is a helper that gives us the key to use for the +// maps returned by things such as provisionerVertexMap. +func provisionerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + pathPrefix = sp.Path().String() + "." + } + + return pathPrefix + k +} + +func provisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisioner); ok { + key := provisionerMapKey(pv.ProvisionerName(), v) + m[key] = v + } + } + + return m +} + +func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvisioner); ok { + m[pv.CloseProvisionerName()] = v + } + } + + return m +} + +type graphNodeCloseProvisioner struct { + ProvisionerNameValue string +} + +func (n *graphNodeCloseProvisioner) Name() string { + return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { + return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} +} + +func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { + return n.ProvisionerNameValue +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go new file mode 100644 index 000000000..54f9829c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_reference.go @@ -0,0 +1,446 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/hashicorp/terraform-plugin-sdk/internal/lang" +) + +// GraphNodeReferenceable must be implemented by any node that represents +// a Terraform thing that can be referenced (resource, module, etc.). +// +// Even if the thing has no name, this should return an empty list. By +// implementing this and returning a non-nil result, you say that this CAN +// be referenced and other methods of referencing may still be possible (such +// as by path!) +type GraphNodeReferenceable interface { + GraphNodeSubPath + + // ReferenceableAddrs returns a list of addresses through which this can be + // referenced. + ReferenceableAddrs() []addrs.Referenceable +} + +// GraphNodeReferencer must be implemented by nodes that reference other +// Terraform items and therefore depend on them. +type GraphNodeReferencer interface { + GraphNodeSubPath + + // References returns a list of references made by this node, which + // include both a referenced address and source location information for + // the reference. + References() []*addrs.Reference +} + +// GraphNodeReferenceOutside is an interface that can optionally be implemented. +// A node that implements it can specify that its own referenceable addresses +// and/or the addresses it references are in a different module than the +// node itself. +// +// Any referenceable addresses returned by ReferenceableAddrs are interpreted +// relative to the returned selfPath. +// +// Any references returned by References are interpreted relative to the +// returned referencePath. +// +// It is valid but not required for either of these paths to match what is +// returned by method Path, though if both match the main Path then there +// is no reason to implement this method. +// +// The primary use-case for this is the nodes representing module input +// variables, since their expressions are resolved in terms of their calling +// module, but they are still referenced from their own module. +type GraphNodeReferenceOutside interface { + // ReferenceOutside returns a path in which any references from this node + // are resolved. + ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) +} + +// ReferenceTransformer is a GraphTransformer that connects all the +// nodes that reference each other in order to form the proper ordering. +type ReferenceTransformer struct{} + +func (t *ReferenceTransformer) Transform(g *Graph) error { + // Build a reference map so we can efficiently look up the references + vs := g.Vertices() + m := NewReferenceMap(vs) + + // Find the things that reference things and connect them + for _, v := range vs { + parents, _ := m.References(v) + parentsDbg := make([]string, len(parents)) + for i, v := range parents { + parentsDbg[i] = dag.VertexName(v) + } + log.Printf( + "[DEBUG] ReferenceTransformer: %q references: %v", + dag.VertexName(v), parentsDbg) + + for _, parent := range parents { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for locals and outputs that depend on other nodes which will be +// removed during destroy. If a destroy node is evaluated before the local or +// output value, it will be removed from the state, and the later interpolation +// will fail. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any outgoing edges so that the value is evaluated first. + for _, e := range g.EdgesFrom(v) { + target := e.Target() + + // only destroy nodes will be evaluated in reverse + if _, ok := target.(GraphNodeDestroyer); !ok { + continue + } + + log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: target, T: v}) + } + } + + return nil +} + +// PruneUnusedValuesTransformer is s GraphTransformer that removes local and +// output values which are not referenced in the graph. Since outputs and +// locals always need to be evaluated, if they reference a resource that is not +// available in the state the interpolation could fail. +type PruneUnusedValuesTransformer struct{} + +func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { + // this might need multiple runs in order to ensure that pruning a value + // doesn't effect a previously checked value. + for removed := 0; ; removed = 0 { + for _, v := range g.Vertices() { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + dependants := g.UpEdges(v) + + switch dependants.Len() { + case 0: + // nothing at all depends on this + g.Remove(v) + removed++ + case 1: + // because an output's destroy node always depends on the output, + // we need to check for the case of a single destroy node. + d := dependants.List()[0] + if _, ok := d.(*NodeDestroyableOutput); ok { + g.Remove(v) + removed++ + } + } + } + if removed == 0 { + break + } + } + + return nil +} + +// ReferenceMap is a structure that can be used to efficiently check +// for references on a graph. +type ReferenceMap struct { + // vertices is a map from internal reference keys (as produced by the + // mapKey method) to one or more vertices that are identified by each key. + // + // A particular reference key might actually identify multiple vertices, + // e.g. in situations where one object is contained inside another. + vertices map[string][]dag.Vertex + + // edges is a map whose keys are a subset of the internal reference keys + // from "vertices", and whose values are the nodes that refer to each + // key. The values in this map are the referrers, while values in + // "verticies" are the referents. The keys in both cases are referents. + edges map[string][]dag.Vertex +} + +// References returns the set of vertices that the given vertex refers to, +// and any referenced addresses that do not have corresponding vertices. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { + rn, ok := v.(GraphNodeReferencer) + if !ok { + return nil, nil + } + if _, ok := v.(GraphNodeSubPath); !ok { + return nil, nil + } + + var matches []dag.Vertex + var missing []addrs.Referenceable + + for _, ref := range rn.References() { + subject := ref.Subject + + key := m.referenceMapKey(v, subject) + if _, exists := m.vertices[key]; !exists { + // If what we were looking for was a ResourceInstance then we + // might be in a resource-oriented graph rather than an + // instance-oriented graph, and so we'll see if we have the + // resource itself instead. + switch ri := subject.(type) { + case addrs.ResourceInstance: + subject = ri.ContainingResource() + case addrs.ResourceInstancePhase: + subject = ri.ContainingResource() + } + key = m.referenceMapKey(v, subject) + } + + vertices := m.vertices[key] + for _, rv := range vertices { + // don't include self-references + if rv == v { + continue + } + matches = append(matches, rv) + } + if len(vertices) == 0 { + missing = append(missing, ref.Subject) + } + } + + return matches, missing +} + +// Referrers returns the set of vertices that refer to the given vertex. +func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { + rn, ok := v.(GraphNodeReferenceable) + if !ok { + return nil + } + sp, ok := v.(GraphNodeSubPath) + if !ok { + return nil + } + + var matches []dag.Vertex + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(sp.Path(), addr) + referrers, ok := m.edges[key] + if !ok { + continue + } + + // If the referrer set includes our own given vertex then we skip, + // since we don't want to return self-references. + selfRef := false + for _, p := range referrers { + if p == v { + selfRef = true + break + } + } + if selfRef { + continue + } + + matches = append(matches, referrers...) + } + + return matches +} + +func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { + return fmt.Sprintf("%s|%s", path.String(), addr.String()) +} + +// vertexReferenceablePath returns the path in which the given vertex can be +// referenced. This is the path that its results from ReferenceableAddrs +// are considered to be relative to. +// +// Only GraphNodeSubPath implementations can be referenced, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { + sp, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex is referenced from a different module than where it was + // declared. + path, _ := outside.ReferenceOutside() + return path + } + + // Vertex is referenced from the same module as where it was declared. + return sp.Path() +} + +// vertexReferencePath returns the path in which references _from_ the given +// vertex must be interpreted. +// +// Only GraphNodeSubPath implementations can have references, so this method +// will panic if the given vertex does not implement that interface. +func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { + sp, ok := referrer.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + var path addrs.ModuleInstance + if outside, ok := referrer.(GraphNodeReferenceOutside); ok { + // Vertex makes references to objects in a different module than where + // it was declared. + _, path = outside.ReferenceOutside() + return path + } + + // Vertex makes references to objects in the same module as where it + // was declared. + return sp.Path() +} + +// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex +// that the reference is from, and "addr" is the address of the object being +// referenced. +// +// The result is an opaque string that includes both the address of the given +// object and the address of the module instance that object belongs to. +// +// Only GraphNodeSubPath implementations can be referrers, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { + path := vertexReferencePath(referrer) + return m.mapKey(path, addr) +} + +// NewReferenceMap is used to create a new reference map for the +// given set of vertices. +func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { + var m ReferenceMap + + // Build the lookup table + vertices := make(map[string][]dag.Vertex) + for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferenceable) + if !ok { + continue + } + + path := m.vertexReferenceablePath(v) + + // Go through and cache them + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(path, addr) + vertices[key] = append(vertices[key], v) + } + + // Any node can be referenced by the address of the module it belongs + // to or any of that module's ancestors. + for _, addr := range path.Ancestors()[1:] { + // Can be referenced either as the specific call instance (with + // an instance key) or as the bare module call itself (the "module" + // block in the parent module that created the instance). + callPath, call := addr.Call() + callInstPath, callInst := addr.CallInstance() + callKey := m.mapKey(callPath, call) + callInstKey := m.mapKey(callInstPath, callInst) + vertices[callKey] = append(vertices[callKey], v) + vertices[callInstKey] = append(vertices[callInstKey], v) + } + } + + // Build the lookup table for referenced by + edges := make(map[string][]dag.Vertex) + for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + + rn, ok := v.(GraphNodeReferencer) + if !ok { + // We're only looking for referenceable nodes + continue + } + + // Go through and cache them + for _, ref := range rn.References() { + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) + } + key := m.referenceMapKey(v, ref.Subject) + edges[key] = append(edges[key], v) + } + } + + m.vertices = vertices + m.edges = edges + return &m +} + +// ReferencesFromConfig returns the references that a configuration has +// based on the interpolated variables in a configuration. +func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { + if body == nil { + return nil + } + refs, _ := lang.ReferencesInBlock(body, schema) + return refs +} + +// appendResourceDestroyReferences identifies resource and resource instance +// references in the given slice and appends to it the "destroy-phase" +// equivalents of those references, returning the result. +// +// This can be used in the References implementation for a node which must also +// depend on the destruction of anything it references. +func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { + given := refs + for _, ref := range given { + switch tr := ref.Subject.(type) { + case addrs.Resource: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + case addrs.ResourceInstance: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + } + } + return refs +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go new file mode 100644 index 000000000..327950d88 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_removed_modules.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// RemovedModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Config *configs.Config // root node in the config tree + State *states.State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + cc := t.Config.DescendentForInstance(m.Addr) + if cc != nil { + continue + } + + log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) + g.Add(&NodeModuleRemoved{Addr: m.Addr}) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go new file mode 100644 index 000000000..51d9466a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_resource_count.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + "github.com/zclconf/go-cty/cty" +) + +// ResourceCountTransformer is a GraphTransformer that expands the count +// out for a specific resource. +// +// This assumes that the count is already interpolated. +type ResourceCountTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc + Schema *configschema.Block + + // Count is either the number of indexed instances to create, or -1 to + // indicate that count is not set at all and thus a no-key instance should + // be created. + Count int + ForEach map[string]cty.Value + Addr addrs.AbsResource +} + +func (t *ResourceCountTransformer) Transform(g *Graph) error { + if t.Count < 0 && t.ForEach == nil { + // Negative count indicates that count is not set at all. + addr := t.Addr.Instance(addrs.NoKey) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + return nil + } + + // Add nodes related to the for_each expression + for key := range t.ForEach { + addr := t.Addr.Instance(addrs.StringKey(key)) + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + // For each count, build and add the node + for i := 0; i < t.Count; i++ { + key := addrs.IntKey(i) + addr := t.Addr.Instance(key) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go new file mode 100644 index 000000000..485c1c8a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_root.go @@ -0,0 +1,38 @@ +package terraform + +import "github.com/hashicorp/terraform-plugin-sdk/internal/dag" + +const rootNodeName = "root" + +// RootTransformer is a GraphTransformer that adds a root to the graph. +type RootTransformer struct{} + +func (t *RootTransformer) Transform(g *Graph) error { + // If we already have a good root, we're done + if _, err := g.Root(); err == nil { + return nil + } + + // Add a root + var root graphNodeRoot + g.Add(root) + + // Connect the root to all the edges that need it + for _, v := range g.Vertices() { + if v == root { + continue + } + + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(root, v)) + } + } + + return nil +} + +type graphNodeRoot struct{} + +func (n graphNodeRoot) Name() string { + return rootNodeName +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go new file mode 100644 index 000000000..e7d95be97 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_state.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/states" +) + +// StateTransformer is a GraphTransformer that adds the elements of +// the state to the graph. +// +// This transform is used for example by the DestroyPlanGraphBuilder to ensure +// that only resources that are in the state are represented in the graph. +type StateTransformer struct { + // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract + // resource instance nodes that this transformer will create. + // + // If either of these is nil, the objects of that type will be skipped and + // not added to the graph at all. It doesn't make sense to use this + // transformer without setting at least one of these, since that would + // skip everything and thus be a no-op. + ConcreteCurrent ConcreteResourceInstanceNodeFunc + ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc + + State *states.State +} + +func (t *StateTransformer) Transform(g *Graph) error { + if !t.State.HasResources() { + log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") + return nil + } + + switch { + case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") + case t.ConcreteCurrent != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") + case t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") + default: + log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resourceAddr := rs.Addr.Absolute(moduleAddr) + + for key, is := range rs.Instances { + addr := resourceAddr.Instance(key) + + if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteCurrent(abstract) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) + } + + if t.ConcreteDeposed != nil { + for dk := range is.Deposed { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteDeposed(abstract, dk) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) + } + } + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go new file mode 100644 index 000000000..beb1eed9e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_targets.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]addrs.Targetable) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool +} + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []addrs.Targetable + + // If set, the index portions of resource addresses will be ignored + // for comparison. This is used when transforming a graph where + // counted resources have not yet been expanded, since otherwise + // the unexpanded nodes (which never have indices) would not match. + IgnoreIndices bool + + // Set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.Targets) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + removable := false + if _, ok := v.(GraphNodeResource); ok { + removable = true + } + + if vr, ok := v.(RemovableIfNotTargeted); ok { + removable = vr.RemoveIfNotTargeted() + } + + if removable && !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) + } + } + } + + return nil +} + +// Returns a set of targeted nodes. A targeted node is either addressed +// directly, address indirectly via its container, or it's a dependency of a +// targeted node. Destroy mode keeps dependents instead of dependencies. +func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { + targetedNodes := new(dag.Set) + + vertices := g.Vertices() + + for _, v := range vertices { + if t.nodeIsTarget(v, addrs) { + targetedNodes.Add(v) + + // We inform nodes that ask about the list of targets - helps for nodes + // that need to dynamically expand. Note that this only occurs for nodes + // that are already directly targeted. + if tn, ok := v.(GraphNodeTargetable); ok { + tn.SetTargets(addrs) + } + + var deps *dag.Set + var err error + if t.Destroy { + deps, err = g.Descendents(v) + } else { + deps, err = g.Ancestors(v) + } + if err != nil { + return nil, err + } + + for _, d := range deps.List() { + targetedNodes.Add(d) + } + } + } + return t.addDependencies(targetedNodes, g) +} + +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + // providers don't cause transitive dependencies, so don't target + // downstream from them. + if _, ok := v.(GraphNodeProvider); ok { + continue + } + + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + + if !targetedNodes.Include(d) { + // this one is going to be removed, so it doesn't count + continue + } + + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) + + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true +} + +func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { + var vertexAddr addrs.Targetable + switch r := v.(type) { + case GraphNodeResourceInstance: + vertexAddr = r.ResourceInstanceAddr() + case GraphNodeResource: + vertexAddr = r.ResourceAddr() + default: + // Only resource and resource instance nodes can be targeted. + return false + } + _, ok := v.(GraphNodeResource) + if !ok { + return false + } + + for _, targetAddr := range targets { + if t.IgnoreIndices { + // If we're ignoring indices then we'll convert any resource instance + // addresses into resource addresses. We don't need to convert + // vertexAddr because instance addresses are contained within + // their associated resources, and so .TargetContains will take + // care of this for us. + if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { + targetAddr = instance.ContainingResource() + } + } + if targetAddr.TargetContains(vertexAddr) { + return true + } + } + + return false +} + +// RemovableIfNotTargeted is a special interface for graph nodes that +// aren't directly addressable, but need to be removed from the graph when they +// are not targeted. (Nodes that are not directly targeted end up in the set of +// targeted nodes because something that _is_ targeted depends on them.) The +// initial use case for this interface is GraphNodeConfigVariable, which was +// having trouble interpolating for module variables in targeted scenarios that +// filtered out the resource node being referenced. +type RemovableIfNotTargeted interface { + RemoveIfNotTargeted() bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go new file mode 100644 index 000000000..21842789c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_transitive_reduction.go @@ -0,0 +1,20 @@ +package terraform + +// TransitiveReductionTransformer is a GraphTransformer that performs +// finds the transitive reduction of the graph. For a definition of +// transitive reduction, see Wikipedia. +type TransitiveReductionTransformer struct{} + +func (t *TransitiveReductionTransformer) Transform(g *Graph) error { + // If the graph isn't valid, skip the transitive reduction. + // We don't error here because Terraform itself handles graph + // validation in a better way, or we assume it does. + if err := g.Validate(); err != nil { + return nil + } + + // Do it + g.TransitiveReduction() + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go new file mode 100644 index 000000000..3afce5660 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_variable.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" +) + +// RootVariableTransformer is a GraphTransformer that adds all the root +// variables to the graph. +// +// Root variables are currently no-ops but they must be added to the +// graph since downstream things that depend on them must be able to +// reach them. +type RootVariableTransformer struct { + Config *configs.Config +} + +func (t *RootVariableTransformer) Transform(g *Graph) error { + // We can have no variables if we have no config. + if t.Config == nil { + return nil + } + + // We're only considering root module variables here, since child + // module variables are handled by ModuleVariableTransformer. + vars := t.Config.Module.Variables + + // Add all variables here + for _, v := range vars { + node := &NodeRootVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, + Config: v, + } + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go new file mode 100644 index 000000000..6b3c62d1f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/transform_vertex.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/internal/dag" +) + +// VertexTransformer is a GraphTransformer that transforms vertices +// using the GraphVertexTransformers. The Transforms are run in sequential +// order. If a transform replaces a vertex then the next transform will see +// the new vertex. +type VertexTransformer struct { + Transforms []GraphVertexTransformer +} + +func (t *VertexTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + for _, vt := range t.Transforms { + newV, err := vt.Transform(v) + if err != nil { + return err + } + + // If the vertex didn't change, then don't do anything more + if newV == v { + continue + } + + // Vertex changed, replace it within the graph + if ok := g.Replace(v, newV); !ok { + // This should never happen, big problem + return fmt.Errorf( + "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + dag.VertexName(v), dag.VertexName(newV), v, newV) + } + + // Replace v so that future transforms use the proper vertex + v = newV + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go new file mode 100644 index 000000000..f6790d9e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input.go @@ -0,0 +1,28 @@ +package terraform + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go new file mode 100644 index 000000000..e2d9c3848 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_mock.go @@ -0,0 +1,25 @@ +package terraform + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go new file mode 100644 index 000000000..b5d32b1e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_input_prefix.go @@ -0,0 +1,20 @@ +package terraform + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go new file mode 100644 index 000000000..84427c63d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go new file mode 100644 index 000000000..135a91c5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go new file mode 100644 index 000000000..d828c921c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_mock.go @@ -0,0 +1,21 @@ +package terraform + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go new file mode 100644 index 000000000..0d7d4ce03 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/ui_output_provisioner.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/internal/addrs" +) + +// ProvisionerUIOutput is an implementation of UIOutput that calls a hook +// for the output so that the hooks can handle it. +type ProvisionerUIOutput struct { + InstanceAddr addrs.AbsResourceInstance + ProvisionerType string + Hooks []Hook +} + +func (o *ProvisionerUIOutput) Output(msg string) { + for _, h := range o.Hooks { + h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go new file mode 100644 index 000000000..5428cd5a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/util.go @@ -0,0 +1,75 @@ +package terraform + +import ( + "sort" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n == 0 { + panic("semaphore with limit 0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go new file mode 100644 index 000000000..627593d76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/valuesourcetype_string.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ValueFromUnknown-0] + _ = x[ValueFromConfig-67] + _ = x[ValueFromAutoFile-70] + _ = x[ValueFromNamedFile-78] + _ = x[ValueFromCLIArg-65] + _ = x[ValueFromEnvVar-69] + _ = x[ValueFromInput-73] + _ = x[ValueFromPlan-80] + _ = x[ValueFromCaller-83] +} + +const ( + _ValueSourceType_name_0 = "ValueFromUnknown" + _ValueSourceType_name_1 = "ValueFromCLIArg" + _ValueSourceType_name_2 = "ValueFromConfig" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" + _ValueSourceType_name_4 = "ValueFromInput" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" +) + +var ( + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} +) + +func (i ValueSourceType) String() string { + switch { + case i == 0: + return _ValueSourceType_name_0 + case i == 65: + return _ValueSourceType_name_1 + case i == 67: + return _ValueSourceType_name_2 + case 69 <= i && i <= 70: + i -= 69 + return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] + case i == 73: + return _ValueSourceType_name_4 + case i == 78: + return _ValueSourceType_name_5 + case i == 80: + return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 + default: + return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go new file mode 100644 index 000000000..4ae9c92cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/variables.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" +) + +// InputValue represents a value for a variable in the root module, provided +// as part of the definition of an operation. +type InputValue struct { + Value cty.Value + SourceType ValueSourceType + + // SourceRange provides source location information for values whose + // SourceType is either ValueFromConfig or ValueFromFile. It is not + // populated for other source types, and so should not be used. + SourceRange tfdiags.SourceRange +} + +// ValueSourceType describes what broad category of source location provided +// a particular value. +type ValueSourceType rune + +const ( + // ValueFromUnknown is the zero value of ValueSourceType and is not valid. + ValueFromUnknown ValueSourceType = 0 + + // ValueFromConfig indicates that a value came from a .tf or .tf.json file, + // e.g. the default value defined for a variable. + ValueFromConfig ValueSourceType = 'C' + + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' + + // ValueFromCLIArg indicates that the value was provided directly in + // a CLI argument. The name of this argument is not recorded and so it must + // be inferred from context. + ValueFromCLIArg ValueSourceType = 'A' + + // ValueFromEnvVar indicates that the value was provided via an environment + // variable. The name of the variable is not recorded and so it must be + // inferred from context. + ValueFromEnvVar ValueSourceType = 'E' + + // ValueFromInput indicates that the value was provided at an interactive + // input prompt. + ValueFromInput ValueSourceType = 'I' + + // ValueFromPlan indicates that the value was retrieved from a stored plan. + ValueFromPlan ValueSourceType = 'P' + + // ValueFromCaller indicates that the value was explicitly overridden by + // a caller to Context.SetVariable after the context was constructed. + ValueFromCaller ValueSourceType = 'S' +) + +func (v *InputValue) GoString() string { + if (v.SourceRange != tfdiags.SourceRange{}) { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) + } else { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) + } +} + +func (v ValueSourceType) GoString() string { + return fmt.Sprintf("terraform.%s", v) +} + +//go:generate go run golang.org/x/tools/cmd/stringer -type ValueSourceType + +// InputValues is a map of InputValue instances. +type InputValues map[string]*InputValue + +// InputValuesFromCaller turns the given map of naked values into an +// InputValues that attributes each value to "a caller", using the source +// type ValueFromCaller. This is primarily useful for testing purposes. +// +// This should not be used as a general way to convert map[string]cty.Value +// into InputValues, since in most real cases we want to set a suitable +// other SourceType and possibly SourceRange value. +func InputValuesFromCaller(vals map[string]cty.Value) InputValues { + ret := make(InputValues, len(vals)) + for k, v := range vals { + ret[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } + } + return ret +} + +// Override merges the given value maps with the receiver, overriding any +// conflicting keys so that the latest definition wins. +func (vv InputValues) Override(others ...InputValues) InputValues { + // FIXME: This should check to see if any of the values are maps and + // merge them if so, in order to preserve the behavior from prior to + // Terraform 0.12. + ret := make(InputValues) + for k, v := range vv { + ret[k] = v + } + for _, other := range others { + for k, v := range other { + ret[k] = v + } + } + return ret +} + +// JustValues returns a map that just includes the values, discarding the +// source information. +func (vv InputValues) JustValues() map[string]cty.Value { + ret := make(map[string]cty.Value, len(vv)) + for k, v := range vv { + ret[k] = v.Value + } + return ret +} + +// DefaultVariableValues returns an InputValues map representing the default +// values specified for variables in the given configuration map. +func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { + ret := make(InputValues) + for k, c := range configs { + if c.Default == cty.NilVal { + continue + } + ret[k] = &InputValue{ + Value: c.Default, + SourceType: ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), + } + } + return ret +} + +// SameValues returns true if the given InputValues has the same values as +// the receiever, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) SameValues(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + } + + return true +} + +// HasValues returns true if the reciever has the same values as in the given +// map, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) HasValues(vals map[string]cty.Value) bool { + if len(vv) != len(vals) { + return false + } + + for k, v := range vv { + oVal, exists := vals[k] + if !exists { + return false + } + if !v.Value.RawEquals(oVal) { + return false + } + } + + return true +} + +// Identical returns true if the given InputValues has the same values, +// source types, and source ranges as the receiver. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +// +// This method is primarily for testing. For most practical purposes, it's +// better to use SameValues or HasValues. +func (vv InputValues) Identical(other InputValues) bool { + if len(vv) != len(other) { + return false + } + + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + if v.SourceType != ov.SourceType { + return false + } + if v.SourceRange != ov.SourceRange { + return false + } + } + + return true +} + +// checkInputVariables ensures that variable values supplied at the UI conform +// to their corresponding declarations in configuration. +// +// The set of values is considered valid only if the returned diagnostics +// does not contain errors. A valid set of values may still produce warnings, +// which should be returned to the user. +func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for name, vc := range vcs { + val, isSet := vs[name] + if !isSet { + // Always an error, since the caller should already have included + // default values from the configuration in the values map. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unassigned variable", + fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), + )) + continue + } + + wantType := vc.Type + + // A given value is valid if it can convert to the desired type. + _, err := convert.Convert(val.Value, wantType) + if err != nil { + switch val.SourceType { + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: + // We have source location information for these. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for input variable", + Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), + Subject: val.SourceRange.ToHCL().Ptr(), + }) + case ValueFromEnvVar: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromInput: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), + )) + default: + // The above gets us good coverage for the situations users + // are likely to encounter with their own inputs. The other + // cases are generally implementation bugs, so we'll just + // use a generic error for these. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), + )) + } + } + } + + // Check for any variables that are assigned without being configured. + // This is always an implementation error in the caller, because we + // expect undefined variables to be caught during context construction + // where there is better context to report it well. + for name := range vs { + if _, defined := vcs[name]; !defined { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value assigned to undeclared variable", + fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), + )) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go new file mode 100644 index 000000000..4cc3bbba6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/version_required.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform-plugin-sdk/internal/tfdiags" + + "github.com/hashicorp/terraform-plugin-sdk/internal/configs" + + tfversion "github.com/hashicorp/terraform-plugin-sdk/internal/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + } + } + } + + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go new file mode 100644 index 000000000..0666aa5f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/terraform/walkoperation_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[walkInvalid-0] + _ = x[walkApply-1] + _ = x[walkPlan-2] + _ = x[walkPlanDestroy-3] + _ = x[walkRefresh-4] + _ = x[walkValidate-5] + _ = x[walkDestroy-6] + _ = x[walkImport-7] + _ = x[walkEval-8] +} + +const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" + +var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} + +func (i walkOperation) String() string { + if i >= walkOperation(len(_walkOperation_index)-1) { + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go new file mode 100644 index 000000000..0dae567db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go @@ -0,0 +1,61 @@ +package auth + +import ( + "github.com/hashicorp/terraform-svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} + +func (s *cachingCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { + // We'll delete the cache entry even if the store fails, since that just + // means that the next read will go to the real store and get a chance to + // see which object (old or new) is actually present. + delete(s.cache, host) + return s.source.StoreForHost(host, credentials) +} + +func (s *cachingCredentialsSource) ForgetForHost(host svchost.Hostname) error { + // We'll delete the cache entry even if the store fails, since that just + // means that the next read will go to the real store and get a chance to + // see if the object is still present. + delete(s.cache, host) + return s.source.ForgetForHost(host) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go new file mode 100644 index 000000000..36441cd11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go @@ -0,0 +1,118 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "fmt" + "net/http" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +// +// The write operations on a Credentials are tried only on the first object, +// under the assumption that it is the primary store. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) + + // StoreForHost takes a HostCredentialsWritable and saves it as the + // credentials for the given host. + // + // If credentials are already stored for the given host, it will try to + // replace those credentials but may produce an error if such replacement + // is not possible. + StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error + + // ForgetForHost discards any stored credentials for the given host. It + // does nothing and returns successfully if no credentials are saved + // for that host. + ForgetForHost(host svchost.Hostname) error +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) + + // Token returns the authentication token. + Token() string +} + +// HostCredentialsWritable is an extension of HostCredentials for credentials +// objects that can be serialized as a JSON-compatible object value for +// storage. +type HostCredentialsWritable interface { + HostCredentials + + // ToStore returns a cty.Value, always of an object type, + // representing data that can be serialized to represent this object + // in persistent storage. + // + // The resulting value may uses only cty values that can be accepted + // by the cty JSON encoder, though the caller may elect to instead store + // it in some other format that has a JSON-compatible type system. + ToStore() cty.Value +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} + +// StoreForHost passes the given arguments to the same operation on the +// first CredentialsSource in the receiver. +func (c Credentials) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { + if len(c) == 0 { + return fmt.Errorf("no credentials store is available") + } + + return c[0].StoreForHost(host, credentials) +} + +// ForgetForHost passes the given arguments to the same operation on the +// first CredentialsSource in the receiver. +func (c Credentials) ForgetForHost(host svchost.Hostname) error { + if len(c) == 0 { + return fmt.Errorf("no credentials store is available") + } + + return c[0].ForgetForHost(host) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go new file mode 100644 index 000000000..7198c6744 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/from_map.go @@ -0,0 +1,48 @@ +package auth + +import ( + "github.com/zclconf/go-cty/cty" +) + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} + +// HostCredentialsFromObject converts a cty.Value of an object type into a +// HostCredentials object if possible, or returns nil if no credentials could +// be extracted from the map. +// +// This function ignores object attributes it is unfamiliar with, to allow for +// future expansion of the credentials object structure for new credential types. +// +// If the given value is not of an object type, this function will panic. +func HostCredentialsFromObject(obj cty.Value) HostCredentials { + if !obj.Type().HasAttribute("token") { + return nil + } + + tokenV := obj.GetAttr("token") + if tokenV.IsNull() || !tokenV.IsKnown() { + return nil + } + if !cty.String.Equals(tokenV.Type()) { + // Weird, but maybe some future Terraform version accepts an object + // here for some reason, so we'll be resilient. + return nil + } + + return HostCredentialsToken(tokenV.AsString()) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go new file mode 100644 index 000000000..76505f209 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go @@ -0,0 +1,149 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} + +func (s *helperProgramCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "store") + args = append(args, string(host)) + + toStore := credentials.ToStore() + toStoreRaw, err := ctyjson.Marshal(toStore, toStore.Type()) + if err != nil { + return fmt.Errorf("can't serialize credentials to store: %s", err) + } + + inReader := bytes.NewReader(toStoreRaw) + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: inReader, + Stderr: &errBuf, + Stdout: nil, + } + err = cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + return nil +} + +func (s *helperProgramCredentialsSource) ForgetForHost(host svchost.Hostname) error { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "forget") + args = append(args, string(host)) + + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stderr: &errBuf, + Stdout: nil, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go new file mode 100644 index 000000000..f8b0b076e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go @@ -0,0 +1,38 @@ +package auth + +import ( + "fmt" + + "github.com/hashicorp/terraform-svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} + +func (s staticCredentialsSource) StoreForHost(host svchost.Hostname, credentials HostCredentialsWritable) error { + return fmt.Errorf("can't store new credentials in a static credentials source") +} + +func (s staticCredentialsSource) ForgetForHost(host svchost.Hostname) error { + return fmt.Errorf("can't discard credentials from a static credentials source") +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go new file mode 100644 index 000000000..1d36553ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/token_credentials.go @@ -0,0 +1,43 @@ +package auth + +import ( + "net/http" + + "github.com/zclconf/go-cty/cty" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer". +// +// To save a token as the credentials for a host, convert the token string to +// this type and use the result as a HostCredentialsWritable implementation. +type HostCredentialsToken string + +// Interface implementation assertions. Compilation will fail here if +// HostCredentialsToken does not fully implement these interfaces. +var _ HostCredentials = HostCredentialsToken("") +var _ HostCredentialsWritable = HostCredentialsToken("") + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} + +// Token returns the authentication token. +func (tc HostCredentialsToken) Token() string { + return string(tc) +} + +// ToStore returns a credentials object with a single attribute "token" whose +// value is the token string. +func (tc HostCredentialsToken) ToStore() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "token": cty.StringVal(string(tc)), + }) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go new file mode 100644 index 000000000..978313633 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go @@ -0,0 +1,275 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost/auth" +) + +const ( + // Fixed path to the discovery manifest. + discoPath = "/.well-known/terraform.json" + + // Arbitrary-but-small number to prevent runaway redirect loops. + maxRedirects = 3 + + // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. + discoTimeout = 11 * time.Second + + // 1MB - to prevent abusive services from using loads of our memory. + maxDiscoDocBytes = 1 * 1024 * 1024 +) + +// httpTransport is overridden during tests, to skip TLS verification. +var httpTransport = defaultHttpTransport() + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]*Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.RoundTripper to use. + Transport http.RoundTripper +} + +// New returns a new initialized discovery object. +func New() *Disco { + return NewWithCredentialsSource(nil) +} + +// NewWithCredentialsSource returns a new discovery object initialized with +// the given credentials source. +func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { + return &Disco{ + hostCache: make(map[svchost.Hostname]*Host), + credsSrc: credsSrc, + Transport: httpTransport, + } +} + +func (d *Disco) SetUserAgent(uaString string) { + d.Transport = &userAgentRoundTripper{ + innerRt: d.Transport, + userAgent: uaString, + } +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// CredentialsSource returns the credentials source associated with the receiver, +// or an empty credentials source if none is associated. +func (d *Disco) CredentialsSource() auth.CredentialsSource { + if d.credsSrc == nil { + // We'll return an empty one just to save the caller from having to + // protect against the nil case, since this interface already allows + // for the possibility of there being no credentials at all. + return auth.StaticCredentialsSource(nil) + } + return d.credsSrc +} + +// CredentialsForHost returns a non-nil HostCredentials if the embedded source has +// credentials available for the host, and a nil HostCredentials if it does not. +func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { + if d.credsSrc == nil { + return nil, nil + } + return d.credsSrc.ForHost(hostname) +} + +// ForceHostServices provides a pre-defined set of services for a given +// host, which prevents the receiver from attempting network-based discovery +// for the given host. Instead, the given services map will be returned +// verbatim. +// +// When providing "forced" services, any relative URLs are resolved against +// the initial discovery URL that would have been used for network-based +// discovery, yielding the same results as if the given map were published +// at the host's default discovery URL, though using absolute URLs is strongly +// recommended to make the configured behavior more explicit. +func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { + if services == nil { + services = map[string]interface{}{} + } + + d.hostCache[hostname] = &Host{ + discoURL: &url.URL{ + Scheme: "https", + Host: string(hostname), + Path: discoPath, + }, + hostname: hostname.ForDisplay(), + services: services, + transport: d.Transport, + } +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say "host does not provide a service", +// regardless of whether that is due to that service specifically being absent +// or due to the host not providing Terraform services at all, since we don't +// wish to expose the detail of whole-host discovery to an end-user. +func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { + if host, cached := d.hostCache[hostname]; cached { + return host, nil + } + + host, err := d.discover(hostname) + if err != nil { + return nil, err + } + d.hostCache[hostname] = host + + return host, nil +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { + host, err := d.Discover(hostname) + if err != nil { + return nil, err + } + return host.ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { + discoURL := &url.URL{ + Scheme: "https", + Host: hostname.String(), + Path: discoPath, + } + + client := &http.Client{ + Transport: d.Transport, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // this error will never actually be seen + } + return nil + }, + } + + req := &http.Request{ + Header: make(http.Header), + Method: "GET", + URL: discoURL, + } + req.Header.Set("Accept", "application/json") + + creds, err := d.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) + } + if creds != nil { + // Update the request to include credentials. + creds.PrepareRequest(req) + } + + log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request discovery document: %v", err) + } + defer resp.Body.Close() + + host := &Host{ + // Use the discovery URL from resp.Request in + // case the client followed any redirects. + discoURL: resp.Request.URL, + hostname: hostname.ForDisplay(), + transport: d.Transport, + } + + // Return the host without any services. + if resp.StatusCode == 404 { + return host, nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) + } + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) + } + if mediaType != "application/json" { + return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) + } + + // This doesn't catch chunked encoding, because ContentLength is -1 in that case. + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + return nil, fmt.Errorf( + "Discovery doc response is too large (got %d bytes; limit %d)", + resp.ContentLength, maxDiscoDocBytes, + ) + } + + // If the response is using chunked encoding then we can't predict its + // size, but we'll at least prevent reading the entire thing into memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + return nil, fmt.Errorf("Error reading discovery document body: %v", err) + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) + } + host.services = services + + return host, nil +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(hostname svchost.Hostname) { + delete(d.hostCache, hostname) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = make(map[svchost.Hostname]*Host) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/host.go b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go new file mode 100644 index 000000000..2d0fc9f12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/disco/host.go @@ -0,0 +1,412 @@ +package disco + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-version" +) + +const versionServiceID = "versions.v1" + +// Host represents a service discovered host. +type Host struct { + discoURL *url.URL + hostname string + services map[string]interface{} + transport http.RoundTripper +} + +// Constraints represents the version constraints of a service. +type Constraints struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// ErrServiceNotProvided is returned when the service is not provided. +type ErrServiceNotProvided struct { + hostname string + service string +} + +// Error returns a customized error message. +func (e *ErrServiceNotProvided) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not provide a %s service", e.service) + } + return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) +} + +// ErrVersionNotSupported is returned when the version is not supported. +type ErrVersionNotSupported struct { + hostname string + service string + version string +} + +// Error returns a customized error message. +func (e *ErrVersionNotSupported) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not support %s version %s", e.service, e.version) + } + return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) +} + +// ErrNoVersionConstraints is returned when checkpoint was disabled +// or the endpoint to query for version constraints was unavailable. +type ErrNoVersionConstraints struct { + disabled bool +} + +// Error returns a customized error message. +func (e *ErrNoVersionConstraints) Error() string { + if e.disabled { + return "checkpoint disabled" + } + return "unable to contact versions service" +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either HTTPS +// or HTTP. +func (h *Host) ServiceURL(id string) (*url.URL, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + urlStr, ok := h.services[id].(string) + if !ok { + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + u, err := h.parseURL(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse service URL: %v", err) + } + + return u, nil +} + +// ServiceOAuthClient returns the OAuth client configuration associated with the +// given service identifier, which should be of the form "servicename.vN". +// +// This is an alternative to ServiceURL for unusual services that require +// a full OAuth2 client definition rather than just a URL. Use this only +// for services whose specification calls for this sort of definition. +func (h *Host) ServiceOAuthClient(id string) (*OAuthClient, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + if _, ok := h.services[id]; !ok { + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + var raw map[string]interface{} + switch v := h.services[id].(type) { + case map[string]interface{}: + raw = v // Great! + case []map[string]interface{}: + // An absolutely infuriating legacy HCL ambiguity. + raw = v[0] + default: + // Debug message because raw Go types don't belong in our UI. + log.Printf("[DEBUG] The definition for %s has Go type %T", id, h.services[id]) + return nil, fmt.Errorf("Service %s must be declared with an object value in the service discovery document", id) + } + + var grantTypes OAuthGrantTypeSet + if rawGTs, ok := raw["grant_types"]; ok { + if gts, ok := rawGTs.([]interface{}); ok { + var kws []string + for _, gtI := range gts { + gt, ok := gtI.(string) + if !ok { + // We'll ignore this so that we can potentially introduce + // other types into this array later if we need to. + continue + } + kws = append(kws, gt) + } + grantTypes = NewOAuthGrantTypeSet(kws...) + } else { + return nil, fmt.Errorf("Service %s is defined with invalid grant_types property: must be an array of grant type strings", id) + } + } else { + grantTypes = NewOAuthGrantTypeSet("authz_code") + } + + ret := &OAuthClient{ + SupportedGrantTypes: grantTypes, + } + if clientIDStr, ok := raw["client"].(string); ok { + ret.ID = clientIDStr + } else { + return nil, fmt.Errorf("Service %s definition is missing required property \"client\"", id) + } + if urlStr, ok := raw["authz"].(string); ok { + u, err := h.parseURL(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse authorization URL: %v", err) + } + ret.AuthorizationURL = u + } else { + if grantTypes.RequiresAuthorizationEndpoint() { + return nil, fmt.Errorf("Service %s definition is missing required property \"authz\"", id) + } + } + if urlStr, ok := raw["token"].(string); ok { + u, err := h.parseURL(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse token URL: %v", err) + } + ret.TokenURL = u + } else { + if grantTypes.RequiresTokenEndpoint() { + return nil, fmt.Errorf("Service %s definition is missing required property \"token\"", id) + } + } + if portsRaw, ok := raw["ports"].([]interface{}); ok { + if len(portsRaw) != 2 { + return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: must be a two-element array", id) + } + invalidPortsErr := fmt.Errorf("Invalid \"ports\" definition for service %s: both ports must be whole numbers between 1024 and 65535", id) + ports := make([]uint16, 2) + for i := range ports { + switch v := portsRaw[i].(type) { + case float64: + // JSON unmarshaling always produces float64. HCL 2 might, if + // an invalid fractional number were given. + if float64(uint16(v)) != v || v < 1024 { + return nil, invalidPortsErr + } + ports[i] = uint16(v) + case int: + // Legacy HCL produces int. HCL 2 will too, if the given number + // is a whole number. + if v < 1024 || v > 65535 { + return nil, invalidPortsErr + } + ports[i] = uint16(v) + default: + // Debug message because raw Go types don't belong in our UI. + log.Printf("[DEBUG] Port value %d has Go type %T", i, portsRaw[i]) + return nil, invalidPortsErr + } + } + if ports[1] < ports[0] { + return nil, fmt.Errorf("Invalid \"ports\" definition for service %s: minimum port cannot be greater than maximum port", id) + } + ret.MinPort = ports[0] + ret.MaxPort = ports[1] + } else { + // Default is to accept any port in the range, for a client that is + // able to call back to any localhost port. + ret.MinPort = 1024 + ret.MaxPort = 65535 + } + + return ret, nil +} + +func (h *Host) parseURL(urlStr string) (*url.URL, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + // Make relative URLs absolute using our discovery URL. + if !u.IsAbs() { + u = h.discoURL.ResolveReference(u) + } + + if u.Scheme != "https" && u.Scheme != "http" { + return nil, fmt.Errorf("unsupported scheme %s", u.Scheme) + } + if u.User != nil { + return nil, fmt.Errorf("embedded username/password information is not permitted") + } + + // Fragment part is irrelevant, since we're not a browser. + u.Fragment = "" + + return u, nil +} + +// VersionConstraints returns the contraints for a given service identifier +// (which should be of the form "servicename.vN") and product. +// +// When an exact (service and version) match is found, the constraints for +// that service are returned. +// +// When the requested version is not provided but the service is, we will +// search for all alternative versions. If mutliple alternative versions +// are found, the contrains of the latest available version are returned. +// +// When a service is not provided at all an error will be returned instead. +// +// When checkpoint is disabled or when a 404 is returned after making the +// HTTP call, an ErrNoVersionConstraints error will be returned. +func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { + svc, _, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // Return early if checkpoint is disabled. + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil, &ErrNoVersionConstraints{disabled: true} + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + // Try to get the service URL for the version service and + // return early if the service isn't provided by the host. + u, err := h.ServiceURL(versionServiceID) + if err != nil { + return nil, err + } + + // Check if we have an exact (service and version) match. + if _, ok := h.services[id].(string); !ok { + // If we don't have an exact match, we search for all matching + // services and then use the service ID of the latest version. + var services []string + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + services = append(services, serviceID) + } + } + + if len(services) == 0 { + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + // Set id to the latest service ID we found. + var latest *version.Version + for _, serviceID := range services { + if _, ver, err := parseServiceID(serviceID); err == nil { + if latest == nil || latest.LessThan(ver) { + id = serviceID + latest = ver + } + } + } + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout = v + } + + client := &http.Client{ + Transport: h.transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + + // Prepare the service URL by setting the service and product. + v := u.Query() + v.Set("product", product) + u.Path += id + u.RawQuery = v.Encode() + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to create version constraints request: %v", err) + } + req.Header.Set("Accept", "application/json") + + log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request version constraints: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return nil, &ErrNoVersionConstraints{disabled: false} + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) + } + + // Parse the constraints from the response body. + result := &Constraints{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, fmt.Errorf("Error parsing version constraints: %v", err) + } + + return result, nil +} + +func parseServiceID(id string) (string, *version.Version, error) { + parts := strings.SplitN(id, ".", 2) + if len(parts) != 2 { + return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) + } + + version, err := version.NewVersion(parts[1]) + if err != nil { + return "", nil, fmt.Errorf("Invalid service version: %v", err) + } + + return parts[0], version, nil +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go new file mode 100644 index 000000000..7e4a38567 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/disco/http_transport.go @@ -0,0 +1,30 @@ +package disco + +import ( + "net/http" + + "github.com/hashicorp/go-cleanhttp" +) + +const DefaultUserAgent = "terraform-svchost/1.0" + +func defaultHttpTransport() http.RoundTripper { + t := cleanhttp.DefaultPooledTransport() + return &userAgentRoundTripper{ + innerRt: t, + userAgent: DefaultUserAgent, + } +} + +type userAgentRoundTripper struct { + innerRt http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + + return rt.innerRt.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go new file mode 100644 index 000000000..9308bbf72 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/disco/oauth_client.go @@ -0,0 +1,178 @@ +package disco + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/oauth2" +) + +// OAuthClient represents an OAuth client configuration, which is used for +// unusual services that require an entire OAuth client configuration as part +// of their service discovery, rather than just a URL. +type OAuthClient struct { + // ID is the identifier for the client, to be used as "client_id" in + // OAuth requests. + ID string + + // Authorization URL is the URL of the authorization endpoint that must + // be used for this OAuth client, as defined in the OAuth2 specifications. + // + // Not all grant types use the authorization endpoint, so it may be omitted + // if none of the grant types in SupportedGrantTypes require it. + AuthorizationURL *url.URL + + // Token URL is the URL of the token endpoint that must be used for this + // OAuth client, as defined in the OAuth2 specifications. + // + // Not all grant types use the token endpoint, so it may be omitted + // if none of the grant types in SupportedGrantTypes require it. + TokenURL *url.URL + + // MinPort and MaxPort define a range of TCP ports on localhost that this + // client is able to use as redirect_uri in an authorization request. + // Terraform will select a port from this range for the temporary HTTP + // server it creates to receive the authorization response, giving + // a URL like http://localhost:NNN/ where NNN is the selected port number. + // + // Terraform will reject any port numbers in this range less than 1024, + // to respect the common convention (enforced on some operating systems) + // that lower port numbers are reserved for "privileged" services. + MinPort, MaxPort uint16 + + // SupportedGrantTypes is a set of the grant types that the client may + // choose from. This includes an entry for each distinct type advertised + // by the server, even if a particular keyword is not supported by the + // current version of Terraform. + SupportedGrantTypes OAuthGrantTypeSet +} + +// Endpoint returns an oauth2.Endpoint value ready to be used with the oauth2 +// library, representing the URLs from the receiver. +func (c *OAuthClient) Endpoint() oauth2.Endpoint { + ep := oauth2.Endpoint{ + // We don't actually auth because we're not a server-based OAuth client, + // so this instead just means that we include client_id as an argument + // in our requests. + AuthStyle: oauth2.AuthStyleInParams, + } + + if c.AuthorizationURL != nil { + ep.AuthURL = c.AuthorizationURL.String() + } + if c.TokenURL != nil { + ep.TokenURL = c.TokenURL.String() + } + + return ep +} + +// OAuthGrantType is an enumeration of grant type strings that a host can +// advertise support for. +// +// Values of this type don't necessarily match with a known constant of the +// type, because they may represent grant type keywords defined in a later +// version of Terraform which this version doesn't yet know about. +type OAuthGrantType string + +const ( + // OAuthAuthzCodeGrant represents an authorization code grant, as + // defined in IETF RFC 6749 section 4.1. + OAuthAuthzCodeGrant = OAuthGrantType("authz_code") + + // OAuthOwnerPasswordGrant represents a resource owner password + // credentials grant, as defined in IETF RFC 6749 section 4.3. + OAuthOwnerPasswordGrant = OAuthGrantType("password") +) + +// UsesAuthorizationEndpoint returns true if the receiving grant type makes +// use of the authorization endpoint from the client configuration, and thus +// if the authorization endpoint ought to be required. +func (t OAuthGrantType) UsesAuthorizationEndpoint() bool { + switch t { + case OAuthAuthzCodeGrant: + return true + case OAuthOwnerPasswordGrant: + return false + default: + // We'll default to false so that we don't impose any requirements + // on any grant type keywords that might be defined for future + // versions of Terraform. + return false + } +} + +// UsesTokenEndpoint returns true if the receiving grant type makes +// use of the token endpoint from the client configuration, and thus +// if the authorization endpoint ought to be required. +func (t OAuthGrantType) UsesTokenEndpoint() bool { + switch t { + case OAuthAuthzCodeGrant: + return true + case OAuthOwnerPasswordGrant: + return true + default: + // We'll default to false so that we don't impose any requirements + // on any grant type keywords that might be defined for future + // versions of Terraform. + return false + } +} + +// OAuthGrantTypeSet represents a set of OAuthGrantType values. +type OAuthGrantTypeSet map[OAuthGrantType]struct{} + +// NewOAuthGrantTypeSet constructs a new grant type set from the given list +// of grant type keyword strings. Any duplicates in the list are ignored. +func NewOAuthGrantTypeSet(keywords ...string) OAuthGrantTypeSet { + ret := make(OAuthGrantTypeSet, len(keywords)) + for _, kw := range keywords { + ret[OAuthGrantType(kw)] = struct{}{} + } + return ret +} + +// Has returns true if the given grant type is in the receiving set. +func (s OAuthGrantTypeSet) Has(t OAuthGrantType) bool { + _, ok := s[t] + return ok +} + +// RequiresAuthorizationEndpoint returns true if any of the grant types in +// the set are known to require an authorization endpoint. +func (s OAuthGrantTypeSet) RequiresAuthorizationEndpoint() bool { + for t := range s { + if t.UsesAuthorizationEndpoint() { + return true + } + } + return false +} + +// RequiresTokenEndpoint returns true if any of the grant types in +// the set are known to require a token endpoint. +func (s OAuthGrantTypeSet) RequiresTokenEndpoint() bool { + for t := range s { + if t.UsesTokenEndpoint() { + return true + } + } + return false +} + +// GoString implements fmt.GoStringer. +func (s OAuthGrantTypeSet) GoString() string { + var buf strings.Builder + i := 0 + buf.WriteString("disco.NewOAuthGrantTypeSet(") + for t := range s { + if i > 0 { + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%q", string(t)) + i++ + } + buf.WriteString(")") + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.mod b/vendor/github.com/hashicorp/terraform-svchost/go.mod new file mode 100644 index 000000000..8f29e4ac5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/go.mod @@ -0,0 +1,12 @@ +module github.com/hashicorp/terraform-svchost + +go 1.12 + +require ( + github.com/google/go-cmp v0.3.1 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-version v1.2.0 + github.com/zclconf/go-cty v1.1.0 + golang.org/x/net v0.0.0-20191009170851-d66e71096ffb + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 +) diff --git a/vendor/github.com/hashicorp/terraform-svchost/go.sum b/vendor/github.com/hashicorp/terraform-svchost/go.sum new file mode 100644 index 000000000..9ad1712f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/go.sum @@ -0,0 +1,36 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.1.0 h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb h1:TR699M2v0qoKTOHxeLgp6zPqaQNs74f01a/ob9W0qko= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/hashicorp/terraform-svchost/label_iter.go b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go new file mode 100644 index 000000000..af8ccbab2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/label_iter.go @@ -0,0 +1,69 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/svchost.go b/vendor/github.com/hashicorp/terraform-svchost/svchost.go new file mode 100644 index 000000000..4060b767e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absence of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 000000000..f0e5c79e1 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 000000000..d4db7fc99 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 000000000..be6ebca9c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 000000000..4f5293828 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 000000000..672a0e581 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 000000000..18a078c8a --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,98 @@ +package yamux + +import ( + "fmt" + "io" + "log" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. + LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 000000000..a80ddec35 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,653 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + + s := &Session{ + config: config, + logger: logger, + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 000000000..183d797bd --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 000000000..aa2391973 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,470 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow -= length + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline.Store(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline.Store(t) + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 000000000..8a73e9249 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 000000000..5091fb073 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 000000000..1f9807757 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 000000000..b03310a91 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 000000000..a828d2848 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 000000000..187ef676d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 000000000..8e26ffeec --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 000000000..1cd2d239c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 000000000..9b7cd89b4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 000000000..13c74604c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 000000000..817900c8f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 000000000..1240a1755 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 000000000..dae79cbdf --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 000000000..ddc1b7d7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore new file mode 100644 index 000000000..e43b0f988 --- /dev/null +++ b/vendor/github.com/joho/godotenv/.gitignore @@ -0,0 +1 @@ +.DS_Store diff --git a/vendor/github.com/joho/godotenv/.travis.yml b/vendor/github.com/joho/godotenv/.travis.yml new file mode 100644 index 000000000..f0db1adcd --- /dev/null +++ b/vendor/github.com/joho/godotenv/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.x + +os: + - linux + - osx diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE new file mode 100644 index 000000000..e7ddd51be --- /dev/null +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -0,0 +1,23 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md new file mode 100644 index 000000000..4e8fcf2e9 --- /dev/null +++ b/vendor/github.com/joho/godotenv/README.md @@ -0,0 +1,163 @@ +# GoDotEnv [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4?svg=true)](https://ci.appveyor.com/project/joho/godotenv) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) + +A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) + +From the original Library: + +> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. +> +> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. + +It can be used as a library (for loading in env for your own daemons etc) or as a bin command. + +There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. + +## Installation + +As a library + +```shell +go get github.com/joho/godotenv +``` + +or if you want to use it as a bin command +```shell +go get github.com/joho/godotenv/cmd/godotenv +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Then in your Go app you can do something like + +```go +package main + +import ( + "github.com/joho/godotenv" + "log" + "os" +) + +func main() { + err := godotenv.Load() + if err != nil { + log.Fatal("Error loading .env file") + } + + s3Bucket := os.Getenv("S3_BUCKET") + secretKey := os.Getenv("SECRET_KEY") + + // now do something with s3 or whatever +} +``` + +If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import + +```go +import _ "github.com/joho/godotenv/autoload" +``` + +While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit + +```go +_ = godotenv.Load("somerandomfile") +_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") +``` + +If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) + +```shell +# I am a comment and that is OK +SOME_VAR=someval +FOO=BAR # comments at line end are OK too +export BAR=BAZ +``` + +Or finally you can do YAML(ish) style + +```yaml +FOO: bar +BAR: baz +``` + +as a final aside, if you don't want godotenv munging your env you can just get a map back instead + +```go +var myEnv map[string]string +myEnv, err := godotenv.Read() + +s3Bucket := myEnv["S3_BUCKET"] +``` + +... or from an `io.Reader` instead of a local file + +```go +reader := getRemoteFile() +myEnv, err := godotenv.Parse(reader) +``` + +... or from a `string` if you so desire + +```go +content := getRemoteFileContent() +myEnv, err := godotenv.Unmarshal(content) +``` + +### Command Mode + +Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` + +``` +godotenv -f /some/path/to/.env some_command with some args +``` + +If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` + +### Writing Env Files + +Godotenv can also write a map representing the environment to a correctly-formatted and escaped file + +```go +env, err := godotenv.Unmarshal("KEY=value") +err := godotenv.Write(env, "./.env") +``` + +... or to a string + +```go +env, err := godotenv.Unmarshal("KEY=value") +content, err := godotenv.Marshal(env) +``` + +## Contributing + +Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. + +*code changes without tests will not be accepted* + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Releases + +Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. + +Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` + +## CI + +Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) + +## Who? + +The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 000000000..29b436c77 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,346 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "sort" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine, envMap) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +//Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return Parse(strings.NewReader(str)) +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file +func Write(envMap map[string]string, filename string) error { + content, error := Marshal(envMap) + if error != nil { + return error + } + file, error := os.Create(filename) + if error != nil { + return error + } + _, err := file.WriteString(content) + return err +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func parseLine(line string, envMap map[string]string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + firstEquals := strings.Index(line, "=") + firstColon := strings.Index(line, ":") + splitString := strings.SplitN(line, "=", 2) + if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { + //this is a yaml-style line + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = parseValue(splitString[1], envMap) + return +} + +func parseValue(value string, envMap map[string]string) string { + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values or possible escapes + if len(value) > 1 { + rs := regexp.MustCompile(`\A'(.*)'\z`) + singleQuotes := rs.FindStringSubmatch(value) + + rd := regexp.MustCompile(`\A"(.*)"\z`) + doubleQuotes := rd.FindStringSubmatch(value) + + if singleQuotes != nil || doubleQuotes != nil { + // pull the quotes off the edges + value = value[1 : len(value)-1] + } + + if doubleQuotes != nil { + // expand newlines + escapeRegex := regexp.MustCompile(`\\.`) + value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + // unescape characters + e := regexp.MustCompile(`\\([^$])`) + value = e.ReplaceAllString(value, "$1") + } + + if singleQuotes == nil { + value = expandVariables(value, envMap) + } + } + + return value +} + +func expandVariables(v string, m map[string]string) string { + r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + + return r.ReplaceAllStringFunc(v, func(s string) string { + submatch := r.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 000000000..98db8f060 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..56729a92c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..1f28d773d --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..887f203dc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..404e10ca0 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,980 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provide colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 000000000..9d9f42485 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.5 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 000000000..2c12960ec --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..9721e16f4 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 000000000..5597e026d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..1e69004bb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 000000000..f310320c3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 000000000..426c8973c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..07e93039d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 000000000..e004038ee --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..f02849c56 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..bdd5c79a0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..af51cbcaa --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml new file mode 100644 index 000000000..b8599b3ac --- /dev/null +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -0,0 +1,14 @@ +sudo: false + +language: go + +go: + - "1.8" + - "1.9" + - "1.10" + +branches: + only: + - master + +script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile new file mode 100644 index 000000000..4874b0082 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -0,0 +1,20 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code +test: + go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) + +# testrace runs the race checker +testrace: + go list $(TEST) | xargs -n1 go test -race $(TESTARGS) + +# updatedeps installs all the dependencies to run and build +updatedeps: + go list ./... \ + | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ + | grep -v github.com/mitchellh/cli \ + | xargs go get -f -u -v + +.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 000000000..8f02cdd0a --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,67 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) + +cli is a library for implementing powerful command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Serf](https://github.com/hashicorp/serf), +[Consul](https://github.com/hashicorp/consul), +[Vault](https://github.com/hashicorp/vault), +[Terraform](https://github.com/hashicorp/terraform), and +[Nomad](https://github.com/hashicorp/nomad). + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + +* Automatic help generation for listing subcommands + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 000000000..3bec6258f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 000000000..c2dbe55aa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,720 @@ +package cli + +import ( + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports bash, zsh and fish. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Ignore the empty key which can be present for default commands. + if k == "" { + return false + } + + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one has a space. This + // disallows commands like: ./cli foo "bar baz". An argument + // with a space is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 000000000..bed11faf5 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 000000000..7a584b7e9 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/go.mod b/vendor/github.com/mitchellh/cli/go.mod new file mode 100644 index 000000000..675325ffa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/go.mod @@ -0,0 +1,12 @@ +module github.com/mitchellh/cli + +require ( + github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 + github.com/bgentry/speakeasy v0.1.0 + github.com/fatih/color v1.7.0 + github.com/hashicorp/go-multierror v1.0.0 // indirect + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.3 + github.com/posener/complete v1.1.1 + golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc // indirect +) diff --git a/vendor/github.com/mitchellh/cli/go.sum b/vendor/github.com/mitchellh/cli/go.sum new file mode 100644 index 000000000..037087523 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/go.sum @@ -0,0 +1,22 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357 h1:Rem2+U35z1QtPQc6r+WolF7yXiefXqDKyk+lN2pE164= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0 h1:j30noezaCfvNLcdMYSvHLv81DxYRSt1grlpseG67vhU= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 000000000..f5ca58f59 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 000000000..a2d6f94f4 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 000000000..b0ec44840 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,73 @@ +package cli + +import ( + "github.com/fatih/color" +) + +const ( + noColor = -1 +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{noColor, false} + UiColorRed = UiColor{int(color.FgHiRed), false} + UiColorGreen = UiColor{int(color.FgHiGreen), false} + UiColorYellow = UiColor{int(color.FgHiYellow), false} + UiColorBlue = UiColor{int(color.FgHiBlue), false} + UiColorMagenta = UiColor{int(color.FgHiMagenta), false} + UiColorCyan = UiColor{int(color.FgHiCyan), false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, uc UiColor) string { + if uc.Code == noColor { + return message + } + + attr := []color.Attribute{color.Attribute(uc.Code)} + if uc.Bold { + attr = append(attr, color.Bold) + } + + return color.New(attr...).SprintFunc()(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 000000000..b4f4dbfaa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 000000000..0bfe0a191 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,111 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 000000000..1e1db3cf6 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml new file mode 100644 index 000000000..74e286ae1 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/colorstring/LICENSE b/vendor/github.com/mitchellh/colorstring/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/colorstring/README.md b/vendor/github.com/mitchellh/colorstring/README.md new file mode 100644 index 000000000..0654d454d --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/README.md @@ -0,0 +1,30 @@ +# colorstring [![Build Status](https://travis-ci.org/mitchellh/colorstring.svg)](https://travis-ci.org/mitchellh/colorstring) + +colorstring is a [Go](http://www.golang.org) library for outputting colored +strings to a console using a simple inline syntax in your string to specify +the color to print as. + +For example, the string `[blue]hello [red]world` would output the text +"hello world" in two colors. The API of colorstring allows for easily disabling +colors, adding aliases, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/colorstring +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/colorstring). + +Usage is easy enough: + +```go +colorstring.Println("[blue]Hello [red]World!") +``` + +Additionally, the `Colorize` struct can be used to set options such as +custom colors, color disabling, etc. diff --git a/vendor/github.com/mitchellh/colorstring/colorstring.go b/vendor/github.com/mitchellh/colorstring/colorstring.go new file mode 100644 index 000000000..3de5b241d --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/colorstring.go @@ -0,0 +1,244 @@ +// colorstring provides functions for colorizing strings for terminal +// output. +package colorstring + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// Color colorizes your strings using the default settings. +// +// Strings given to Color should use the syntax `[color]` to specify the +// color for text following. For example: `[blue]Hello` will return "Hello" +// in blue. See DefaultColors for all the supported colors and attributes. +// +// If an unrecognized color is given, it is ignored and assumed to be part +// of the string. For example: `[hi]world` will result in "[hi]world". +// +// A color reset is appended to the end of every string. This will reset +// the color of following strings when you output this text to the same +// terminal session. +// +// If you want to customize any of this behavior, use the Colorize struct. +func Color(v string) string { + return def.Color(v) +} + +// ColorPrefix returns the color sequence that prefixes the given text. +// +// This is useful when wrapping text if you want to inherit the color +// of the wrapped text. For example, "[green]foo" will return "[green]". +// If there is no color sequence, then this will return "". +func ColorPrefix(v string) string { + return def.ColorPrefix(v) +} + +// Colorize colorizes your strings, giving you the ability to customize +// some of the colorization process. +// +// The options in Colorize can be set to customize colorization. If you're +// only interested in the defaults, just use the top Color function directly, +// which creates a default Colorize. +type Colorize struct { + // Colors maps a color string to the code for that color. The code + // is a string so that you can use more complex colors to set foreground, + // background, attributes, etc. For example, "boldblue" might be + // "1;34" + Colors map[string]string + + // If true, color attributes will be ignored. This is useful if you're + // outputting to a location that doesn't support colors and you just + // want the strings returned. + Disable bool + + // Reset, if true, will reset the color after each colorization by + // adding a reset code at the end. + Reset bool +} + +// Color colorizes a string according to the settings setup in the struct. +// +// For more details on the syntax, see the top-level Color function. +func (c *Colorize) Color(v string) string { + matches := parseRe.FindAllStringIndex(v, -1) + if len(matches) == 0 { + return v + } + + result := new(bytes.Buffer) + colored := false + m := []int{0, 0} + for _, nm := range matches { + // Write the text in between this match and the last + result.WriteString(v[m[1]:nm[0]]) + m = nm + + var replace string + if code, ok := c.Colors[v[m[0]+1:m[1]-1]]; ok { + colored = true + + if !c.Disable { + replace = fmt.Sprintf("\033[%sm", code) + } + } else { + replace = v[m[0]:m[1]] + } + + result.WriteString(replace) + } + result.WriteString(v[m[1]:]) + + if colored && c.Reset && !c.Disable { + // Write the clear byte at the end + result.WriteString("\033[0m") + } + + return result.String() +} + +// ColorPrefix returns the first color sequence that exists in this string. +// +// For example: "[green]foo" would return "[green]". If no color sequence +// exists, then "" is returned. This is especially useful when wrapping +// colored texts to inherit the color of the wrapped text. +func (c *Colorize) ColorPrefix(v string) string { + return prefixRe.FindString(strings.TrimSpace(v)) +} + +// DefaultColors are the default colors used when colorizing. +// +// If the color is surrounded in underscores, such as "_blue_", then that +// color will be used for the background color. +var DefaultColors map[string]string + +func init() { + DefaultColors = map[string]string{ + // Default foreground/background colors + "default": "39", + "_default_": "49", + + // Foreground colors + "black": "30", + "red": "31", + "green": "32", + "yellow": "33", + "blue": "34", + "magenta": "35", + "cyan": "36", + "light_gray": "37", + "dark_gray": "90", + "light_red": "91", + "light_green": "92", + "light_yellow": "93", + "light_blue": "94", + "light_magenta": "95", + "light_cyan": "96", + "white": "97", + + // Background colors + "_black_": "40", + "_red_": "41", + "_green_": "42", + "_yellow_": "43", + "_blue_": "44", + "_magenta_": "45", + "_cyan_": "46", + "_light_gray_": "47", + "_dark_gray_": "100", + "_light_red_": "101", + "_light_green_": "102", + "_light_yellow_": "103", + "_light_blue_": "104", + "_light_magenta_": "105", + "_light_cyan_": "106", + "_white_": "107", + + // Attributes + "bold": "1", + "dim": "2", + "underline": "4", + "blink_slow": "5", + "blink_fast": "6", + "invert": "7", + "hidden": "8", + + // Reset to reset everything to their defaults + "reset": "0", + "reset_bold": "21", + } + + def = Colorize{ + Colors: DefaultColors, + Reset: true, + } +} + +var def Colorize +var parseReRaw = `\[[a-z0-9_-]+\]` +var parseRe = regexp.MustCompile(`(?i)` + parseReRaw) +var prefixRe = regexp.MustCompile(`^(?i)(` + parseReRaw + `)+`) + +// Print is a convenience wrapper for fmt.Print with support for color codes. +// +// Print formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are added between +// operands when neither is a string. It returns the number of bytes written +// and any write error encountered. +func Print(a string) (n int, err error) { + return fmt.Print(Color(a)) +} + +// Println is a convenience wrapper for fmt.Println with support for color +// codes. +// +// Println formats using the default formats for its operands and writes to +// standard output with support for color codes. Spaces are always added +// between operands and a newline is appended. It returns the number of bytes +// written and any write error encountered. +func Println(a string) (n int, err error) { + return fmt.Println(Color(a)) +} + +// Printf is a convenience wrapper for fmt.Printf with support for color codes. +// +// Printf formats according to a format specifier and writes to standard output +// with support for color codes. It returns the number of bytes written and any +// write error encountered. +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(Color(format), a...) +} + +// Fprint is a convenience wrapper for fmt.Fprint with support for color codes. +// +// Fprint formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are added between operands when neither +// is a string. It returns the number of bytes written and any write error +// encountered. +func Fprint(w io.Writer, a string) (n int, err error) { + return fmt.Fprint(w, Color(a)) +} + +// Fprintln is a convenience wrapper for fmt.Fprintln with support for color +// codes. +// +// Fprintln formats using the default formats for its operands and writes to w +// with support for color codes. Spaces are always added between operands and a +// newline is appended. It returns the number of bytes written and any write +// error encountered. +func Fprintln(w io.Writer, a string) (n int, err error) { + return fmt.Fprintln(w, Color(a)) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf with support for color +// codes. +// +// Fprintf formats according to a format specifier and writes to w with support +// for color codes. It returns the number of bytes written and any write error +// encountered. +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, Color(format), a...) +} diff --git a/vendor/github.com/mitchellh/colorstring/go.mod b/vendor/github.com/mitchellh/colorstring/go.mod new file mode 100644 index 000000000..446ff8d30 --- /dev/null +++ b/vendor/github.com/mitchellh/colorstring/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/colorstring diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 000000000..d7b9589ab --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 000000000..f0fbd2e5c --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 000000000..db6a6aa1a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 000000000..140435255 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,548 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/copystructure/go.mod b/vendor/github.com/mitchellh/copystructure/go.mod new file mode 100644 index 000000000..d01864309 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.mod @@ -0,0 +1,3 @@ +module github.com/mitchellh/copystructure + +require github.com/mitchellh/reflectwalk v1.0.0 diff --git a/vendor/github.com/mitchellh/copystructure/go.sum b/vendor/github.com/mitchellh/copystructure/go.sum new file mode 100644 index 000000000..be5724561 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 000000000..d70706d5b --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 000000000..7efa09a04 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 000000000..25378537e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 000000000..928d000ec --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 000000000..a3866a291 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 000000000..26781bbae --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod new file mode 100644 index 000000000..062796de7 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-testing-interface diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 000000000..204afb420 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 000000000..31b42cadf --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 000000000..60ae31170 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod new file mode 100644 index 000000000..2ae411b20 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-wordwrap diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 000000000..ac67205bc --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..1689c7d73 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - "1.11.x" + - tip + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 000000000..3b3cb723f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,21 @@ +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..0018dc7d9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..1f0abc65a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,217 @@ +package mapstructure + +import ( + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod new file mode 100644 index 000000000..d2a712562 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/mapstructure diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..256ee63fb --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1149 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec( + d.config.DecodeHook, + inputVal.Type(), outVal.Type(), input) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + tagParts := strings.Split(tagValue, ",") + + // Determine the name of the key in the map + keyName := f.Name + if tagParts[0] != "" { + if tagParts[0] == "-" { + continue + } + keyName = tagParts[0] + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + if squash && v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // If the input value is empty, then don't allocate since non-nil != nil + if dataVal.Len() == 0 { + return nil + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + m := make(map[string]interface{}) + mval := reflect.Indirect(reflect.ValueOf(&m)) + if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, mval, val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 000000000..ac82cd2e1 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/go.mod b/vendor/github.com/mitchellh/reflectwalk/go.mod new file mode 100644 index 000000000..52bb7c469 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/reflectwalk diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 000000000..6a7f17611 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 000000000..70760cf4c --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 000000000..3a93a0b11 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,402 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/oklog/run/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/oklog/run/.travis.yml b/vendor/github.com/oklog/run/.travis.yml new file mode 100644 index 000000000..362bdd41c --- /dev/null +++ b/vendor/github.com/oklog/run/.travis.yml @@ -0,0 +1,12 @@ +language: go +sudo: false +go: + - 1.x + - tip +install: + - go get -v github.com/golang/lint/golint + - go build ./... +script: + - go vet ./... + - $HOME/gopath/bin/golint . + - go test -v -race ./... diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/oklog/run/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/oklog/run/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md new file mode 100644 index 000000000..a7228cd9a --- /dev/null +++ b/vendor/github.com/oklog/run/README.md @@ -0,0 +1,73 @@ +# run + +[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) +[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run) +[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) +[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) + +run.Group is a universal mechanism to manage goroutine lifecycles. + +Create a zero-value run.Group, and then add actors to it. Actors are defined as +a pair of functions: an **execute** function, which should run synchronously; +and an **interrupt** function, which, when invoked, should cause the execute +function to return. Finally, invoke Run, which blocks until the first actor +returns. This general-purpose API allows callers to model pretty much any +runnable task, and achieve well-defined lifecycle semantics for the group. + +run.Group was written to manage component lifecycles in func main for +[OK Log](https://github.com/oklog/oklog). +But it's useful in any circumstance where you need to orchestrate multiple +goroutines as a unit whole. +[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a +video of a talk where run.Group is described. + +## Examples + +### context.Context + +```go +ctx, cancel := context.WithCancel(context.Background()) +g.Add(func() error { + return myProcess(ctx, ...) +}, func(error) { + cancel() +}) +``` + +### net.Listener + +```go +ln, _ := net.Listen("tcp", ":8080") +g.Add(func() error { + return http.Serve(ln, nil) +}, func(error) { + ln.Close() +}) +``` + +### io.ReadCloser + +```go +var conn io.ReadCloser = ... +g.Add(func() error { + s := bufio.NewScanner(conn) + for s.Scan() { + println(s.Text()) + } + return s.Err() +}, func(error) { + conn.Close() +}) +``` + +## Comparisons + +Package run is somewhat similar to package +[errgroup](https://godoc.org/golang.org/x/sync/errgroup), +except it doesn't require actor goroutines to understand context semantics. + +It's somewhat similar to package +[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or +[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), +except it has a much smaller API surface, delegating e.g. staged shutdown of +goroutines to the caller. diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go new file mode 100644 index 000000000..832d47dd1 --- /dev/null +++ b/vendor/github.com/oklog/run/group.go @@ -0,0 +1,62 @@ +// Package run implements an actor-runner with deterministic teardown. It is +// somewhat similar to package errgroup, except it does not require actor +// goroutines to understand context semantics. This makes it suitable for use in +// more circumstances; for example, goroutines which are handling connections +// from net.Listeners, or scanning input from a closable io.Reader. +package run + +// Group collects actors (functions) and runs them concurrently. +// When one actor (function) returns, all actors are interrupted. +// The zero value of a Group is useful. +type Group struct { + actors []actor +} + +// Add an actor (function) to the group. Each actor must be pre-emptable by an +// interrupt function. That is, if interrupt is invoked, execute should return. +// Also, it must be safe to call interrupt even after execute has returned. +// +// The first actor (function) to return interrupts all running actors. +// The error is passed to the interrupt functions, and is returned by Run. +func (g *Group) Add(execute func() error, interrupt func(error)) { + g.actors = append(g.actors, actor{execute, interrupt}) +} + +// Run all actors (functions) concurrently. +// When the first actor returns, all others are interrupted. +// Run only returns when all actors have exited. +// Run returns the error returned by the first exiting actor. +func (g *Group) Run() error { + if len(g.actors) == 0 { + return nil + } + + // Run each actor. + errors := make(chan error, len(g.actors)) + for _, a := range g.actors { + go func(a actor) { + errors <- a.execute() + }(a) + } + + // Wait for the first actor to stop. + err := <-errors + + // Signal all actors to stop. + for _, a := range g.actors { + a.interrupt(err) + } + + // Wait for all actors to stop. + for i := 1; i < cap(errors); i++ { + <-errors + } + + // Return the original error. + return err +} + +type actor struct { + execute func() error + interrupt func(error) +} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 000000000..c67dad612 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 000000000..003e99fad --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore new file mode 100644 index 000000000..293955f99 --- /dev/null +++ b/vendor/github.com/posener/complete/.gitignore @@ -0,0 +1,4 @@ +.idea +coverage.txt +gocomplete/gocomplete +example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml new file mode 100644 index 000000000..2fae94541 --- /dev/null +++ b/vendor/github.com/posener/complete/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.11 + - 1.10.x + - 1.9 + - 1.8 + +before_install: + - go get -u -t ./... + +script: + - GO111MODULE=on ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 000000000..16249b4a1 --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 000000000..17ab2c6d6 --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,111 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. +func splitFields(line string) []string { + parts := strings.Fields(line) + + // Add empty field if the last field was completed. + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + + // Treat the last field if it is of the form "a=b" + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +func (a Args) from(i int) Args { + if i > len(a.All) { + i = len(a.All) + } + a.All = a.All[i:] + + if i > len(a.Completed) { + i = len(a.Completed) + } + a.Completed = a.Completed[i:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 000000000..b99fe5290 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 000000000..a287f9986 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,32 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) Install(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if lineInFile(b.rc, completeCmd) { + return fmt.Errorf("already installed in %s", b.rc) + } + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if !lineInFile(b.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go new file mode 100644 index 000000000..6467196bc --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/fish.go @@ -0,0 +1,56 @@ +package install + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "text/template" +) + +// (un)install in fish + +type fish struct { + configDir string +} + +func (f fish) Install(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } + if _, err := os.Stat(completionFile); err == nil { + return fmt.Errorf("already installed at %s", completionFile) + } + + return createFile(completionFile, completeCmd) +} + +func (f fish) Uninstall(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + if _, err := os.Stat(completionFile); err != nil { + return fmt.Errorf("does not installed in %s", f.configDir) + } + + return os.Remove(completionFile) +} + +func (f fish) cmd(cmd, bin string) (string, error) { + var buf bytes.Buffer + params := struct{ Cmd, Bin string }{cmd, bin} + tmpl := template.Must(template.New("cmd").Parse(` +function __complete_{{.Cmd}} + set -lx COMP_LINE (string join ' ' (commandline -o)) + test (commandline -ct) = "" + and set COMP_LINE "$COMP_LINE " + {{.Bin}} +end +complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 000000000..dfa1963b8 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,119 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + if d := fishConfigDir(); d != "" { + i = append(i, fish{d}) + } + return +} + +func fishConfigDir() string { + configDir := filepath.Join(getConfigHomePath(), "fish") + if configDir == "" { + return "" + } + if info, err := os.Stat(configDir); err != nil || !info.IsDir() { + return "" + } + return configDir +} + +func getConfigHomePath() string { + u, err := user.Current() + if err != nil { + return "" + } + + configHome := os.Getenv("XDG_CONFIG_HOME") + if configHome == "" { + return filepath.Join(u.HomeDir, ".config") + } + return configHome +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 000000000..d34ac8cae --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,140 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func createFile(name string, content string) error { + // make sure file directory exists + if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { + return err + } + + // create the file + f, err := os.Create(name) + if err != nil { + return err + } + defer f.Close() + + // write file content + _, err = f.WriteString(fmt.Sprintf("%s\n", content)) + return err +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 000000000..a625f53cf --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,39 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) Install(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if lineInFile(z.rc, completeCmd) { + return fmt.Errorf("already installed in %s", z.rc) + } + + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if !lineInFile(z.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 000000000..82d37d529 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 000000000..725c4debc --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,109 @@ +// Package complete provides a tool for bash writing bash completion in go. +// +// Writing bash completion scripts is a hard work. This package provides an easy way +// to create bash completion scripts for any command, and also an easy way to install/uninstall +// the completion of the command. +package complete + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + + "github.com/posener/complete/cmd" + "github.com/posener/complete/match" +) + +const ( + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, point, ok := getEnv() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if match.Prefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) + if line == "" { + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) + } + return line, point, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod new file mode 100644 index 000000000..fef0c440d --- /dev/null +++ b/vendor/github.com/posener/complete/go.mod @@ -0,0 +1,3 @@ +module github.com/posener/complete + +require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum new file mode 100644 index 000000000..d2f13301d --- /dev/null +++ b/vendor/github.com/posener/complete/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 000000000..c3029556e --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,22 @@ +package complete + +import ( + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go new file mode 100644 index 000000000..051171e8a --- /dev/null +++ b/vendor/github.com/posener/complete/match/file.go @@ -0,0 +1,19 @@ +package match + +import "strings" + +// File returns true if prefix can match the file +func File(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go new file mode 100644 index 000000000..812fcac96 --- /dev/null +++ b/vendor/github.com/posener/complete/match/match.go @@ -0,0 +1,6 @@ +package match + +// Match matches two strings +// it is used for comparing a term to the last typed +// word, the prefix, and see if it is a possible auto complete option. +type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go new file mode 100644 index 000000000..9a01ba63a --- /dev/null +++ b/vendor/github.com/posener/complete/match/prefix.go @@ -0,0 +1,9 @@ +package match + +import "strings" + +// Prefix is a simple Matcher, if the word is it's prefix, there is a match +// Match returns true if a has the prefix as prefix +func Prefix(long, prefix string) bool { + return strings.HasPrefix(long, prefix) +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 000000000..820706325 --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 000000000..c8adf7e80 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,108 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/posener/complete/match" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := a.Directory() + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if match.File(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 000000000..fa4a34ae4 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md new file mode 100644 index 000000000..6d757ef82 --- /dev/null +++ b/vendor/github.com/posener/complete/readme.md @@ -0,0 +1,118 @@ +# complete + +A tool for bash writing bash completion in go, and bash completion for the go command line. + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +## go command bash completion + +In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see [Usage](#usage). + +### Install + +1. Type in your shell: +``` +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +## complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. + +Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh new file mode 100644 index 000000000..56bfcf15d --- /dev/null +++ b/vendor/github.com/posener/complete/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go new file mode 100644 index 000000000..58b8b7927 --- /dev/null +++ b/vendor/github.com/posener/complete/utils.go @@ -0,0 +1,46 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" +) + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/r3labs/diff/.gitignore b/vendor/github.com/r3labs/diff/.gitignore new file mode 100644 index 000000000..a1338d685 --- /dev/null +++ b/vendor/github.com/r3labs/diff/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/r3labs/diff/CONTRIBUTING.md b/vendor/github.com/r3labs/diff/CONTRIBUTING.md new file mode 100644 index 000000000..3c00b27c5 --- /dev/null +++ b/vendor/github.com/r3labs/diff/CONTRIBUTING.md @@ -0,0 +1,80 @@ +# Contributing guidelines + +Looking to contribute something to this project? Here's how you can help: + +Please take a moment to review this document in order to make the contribution process easy and effective for everyone involved. + +Following these guidelines helps to communicate that you respect the time of the developers managing and developing this open source project. In return, they should reciprocate that respect in addressing your issue or assessing patches and features. + +We also have a [code of conduct](https://ernest.io/conduct). + +## Using the issue tracker + +The issue tracker is the preferred channel for [bug reports](#bug-reports), [features requests](#feature-requests) and [submitting pull requests](#pull-requests), but please respect the following restrictions: + +* Please **do not** use the issue tracker for personal support requests. + +* Please **do not** derail issues. Keep the discussion on topic and + respect the opinions of others. + + +## Bug reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. +Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +2. **Check if the issue has been fixed** — try to reproduce it using the + latest `master` or `develop` branch in the repository. + +3. **Isolate the problem** — create a reduced test case and a live example. + +A good bug report shouldn't leave others needing to chase you up for more +information. Please try to be as detailed as possible in your report. What is +your environment? What steps will reproduce the issue? Which environment experience the problem? What would you expect to be the outcome? All these +details will help people to fix any potential bugs. + +Example: + +> Short and descriptive example bug report title +> +> A summary of the issue and the environment in which it occurs. If +> suitable, include the steps required to reproduce the bug. +> +> 1. This is the first step +> 2. This is the second step +> 3. Further steps, etc. +> +> `` - a link to the reduced test case +> +> Any other information you want to share that is relevant to the issue being +> reported. This might include the lines of code that you have identified as +> causing the bug, and potential solutions (and your opinions on their +> merits). + + +## Feature requests + +Feature requests are welcome. But take a moment to find out whether your idea +fits with the scope and aims of the project. It's up to *you* to make a strong +case to convince the project's developers of the merits of this feature. Please +provide as much detail and context as possible. + + +## Pull requests + +Good pull requests - patches, improvements, new features - are a fantastic +help. They should remain focused in scope and avoid containing unrelated +commits. + +[**Please ask first**](https://ernest.io/community) before embarking on any significant pull request (e.g. +implementing features, refactoring code, porting to a different language), +otherwise you risk spending a lot of time working on something that the +project's developers might not want to merge into the project. + +Please adhere to the coding conventions used throughout a project (indentation, +accurate comments, etc.) and any other requirements (such as test coverage). diff --git a/vendor/github.com/r3labs/diff/LICENSE b/vendor/github.com/r3labs/diff/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/r3labs/diff/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/r3labs/diff/Makefile b/vendor/github.com/r3labs/diff/Makefile new file mode 100644 index 000000000..f16fa853a --- /dev/null +++ b/vendor/github.com/r3labs/diff/Makefile @@ -0,0 +1,16 @@ +install: + go install -v ${LDFLAGS} + +test: + @go test -v -cover ./... + +cover: + @go test -coverprofile cover.out + +dev-deps: + @go get -u github.com/stretchr/testify + @go get github.com/alecthomas/gometalinter + @gometalinter --install > /dev/null + +lint: + @gometalinter --vendor --disable-all --enable=errcheck --enable=golint --enable=vet ./... diff --git a/vendor/github.com/r3labs/diff/README.md b/vendor/github.com/r3labs/diff/README.md new file mode 100644 index 000000000..02485364b --- /dev/null +++ b/vendor/github.com/r3labs/diff/README.md @@ -0,0 +1,180 @@ +# diff + +A library for diffing golang structures and values. + +Utilizing field tags and reflection, it is able to compare two structures of the same type and create a changelog of all modified values. The produced changelog can easily be serialized to json. + +## Build status + +* Master [![CircleCI](https://circleci.com/gh/r3labs/diff/tree/master.svg?style=svg)](https://circleci.com/gh/r3labs/diff/tree/master) + +## Installation + +``` +go get github.com/r3labs/diff +``` + +## Changelog Format + +When diffing two structures using `Diff`, a changelog will be produced. Any detected changes will populate the changelog array with a Change type: + +```go +type Change struct { + Type string // The type of change detected; can be one of create, update or delete + Path []string // The path of the detected change; will contain any field name or array index that was part of the traversal + From interface{} // The original value that was present in the "from" structure + To interface{} // The new value that was detected as a change in the "to" structure +} +``` + +Given the example below, we are diffing two slices where the third element has been removed: + +```go +from := []int{1, 2, 3, 4} +to := []int{1, 2, 4} + +changelog, _ := diff.Diff(from, to) +``` + +The resultant changelog should contain one change: + +```go +Change{ + Type: "delete", + Path: ["2"], + From: 3, + To: nil, +} +``` + +## Supported Types + +A diffable value can be/contain any of the following types: + +* struct +* slice +* string +* int +* bool +* map +* pointer + +### Tags + +In order for struct fields to be compared, they must be tagged with a given name. All tag values are prefixed with `diff`. i.e. `diff:"items"`. + +* `-` : In the event that you want to exclude a value from the diff, you can use the tag `diff:"-"` and the field will be ignored. + +* `identifier` : If you need to compare arrays by a matching identifier and not based on order, you can specify the `identifier` tag. If an identifiable element is found in both the from and to structures, they will be directly compared. i.e. `diff:"name,identifier"` + +* `immutable` : Will omit this struct field from diffing. When using `diff.StructValues()` these values will be added to the returned changelog. It's usecase is for when we have nothing to compare a struct to and want to show all of its relevant values. + +## Usage + +### Basic Example + +Diffing a basic set of values can be accomplished using the diff functions. Any items that specify a "diff" tag using a name will be compared. + +```go +import "github.com/r3labs/diff" + +type Order struct { + ID string `diff:"id"` + Items []int `diff:"items"` +} + +func main() { + a := Order{ + ID: "1234", + Items: []int{1, 2, 3, 4}, + } + + b := Order{ + ID: "1234", + Items: []int{1, 2, 4}, + } + + changelog, err := diff.Diff(a, b) + ... +} +``` + +In this example, the output generated in the changelog will indicate that the third element with a value of '3' was removed from items. +When marshalling the changelog to json, the output will look like: + +```json +[ + { + "type": "delete", + "path": ["items", "2"], + "from": 3, + "to": null + } +] +``` + +### Options and Configuration + +You can create a new instance of a differ that allows options to be set. + +```go +import "github.com/r3labs/diff" + +type Order struct { + ID string `diff:"id"` + Items []int `diff:"items"` +} + +func main() { + a := Order{ + ID: "1234", + Items: []int{1, 2, 3, 4}, + } + + b := Order{ + ID: "1234", + Items: []int{1, 2, 4}, + } + + d, err := diff.NewDiffer(diff.SliceOrdering(true)) + if err != nil { + panic(err) + } + + changelog, err := d.Diff(a, b) + ... +} +``` + +Supported options are: + +`SliceOrdering` ensures that the ordering of items in a slice is taken into account + + +## Running Tests + +``` +make test +``` + +## Contributing + +Please read through our +[contributing guidelines](CONTRIBUTING.md). +Included are directions for opening issues, coding standards, and notes on +development. + +Moreover, if your pull request contains patches or features, you must include +relevant unit tests. + +## Versioning + +For transparency into our release cycle and in striving to maintain backward +compatibility, this project is maintained under [the Semantic Versioning guidelines](http://semver.org/). + +## Copyright and License + +Code and documentation copyright since 2015 r3labs.io authors. + +Code released under +[the Mozilla Public License Version 2.0](LICENSE). diff --git a/vendor/github.com/r3labs/diff/comparative.go b/vendor/github.com/r3labs/diff/comparative.go new file mode 100644 index 000000000..f92ff6bdd --- /dev/null +++ b/vendor/github.com/r3labs/diff/comparative.go @@ -0,0 +1,44 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +// Comparative ... +type Comparative struct { + A, B *reflect.Value +} + +// ComparativeList : stores indexed comparative +type ComparativeList struct { + m map[interface{}]*Comparative + keys []interface{} +} + +// NewComparativeList : returns a new comparative list +func NewComparativeList() *ComparativeList { + return &ComparativeList{ + m: make(map[interface{}]*Comparative), + keys: make([]interface{}, 0), + } +} + +func (cl *ComparativeList) addA(k interface{}, v *reflect.Value) { + if (*cl).m[k] == nil { + (*cl).m[k] = &Comparative{} + (*cl).keys = append((*cl).keys, k) + } + (*cl).m[k].A = v +} + +func (cl *ComparativeList) addB(k interface{}, v *reflect.Value) { + if (*cl).m[k] == nil { + (*cl).m[k] = &Comparative{} + (*cl).keys = append((*cl).keys, k) + } + (*cl).m[k].B = v +} diff --git a/vendor/github.com/r3labs/diff/diff.go b/vendor/github.com/r3labs/diff/diff.go new file mode 100644 index 000000000..35fe0c242 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff.go @@ -0,0 +1,264 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +var ( + // ErrTypeMismatch Compared types do not match + ErrTypeMismatch = errors.New("types do not match") + // ErrInvalidChangeType The specified change values are not unsupported + ErrInvalidChangeType = errors.New("change type must be one of 'create' or 'delete'") +) + +const ( + // CREATE represents when an element has been added + CREATE = "create" + // UPDATE represents when an element has been updated + UPDATE = "update" + // DELETE represents when an element has been removed + DELETE = "delete" +) + +// Differ a configurable diff instance +type Differ struct { + SliceOrdering bool + DisableStructValues bool + cl Changelog +} + +// Changelog stores a list of changed items +type Changelog []Change + +// Change stores information about a changed item +type Change struct { + Type string `json:"type"` + Path []string `json:"path"` + From interface{} `json:"from"` + To interface{} `json:"to"` +} + +// Changed returns true if both values differ +func Changed(a, b interface{}) bool { + cl, _ := Diff(a, b) + return len(cl) > 0 +} + +// Diff returns a changelog of all mutated values from both +func Diff(a, b interface{}) (Changelog, error) { + var d Differ + + return d.cl, d.diff([]string{}, reflect.ValueOf(a), reflect.ValueOf(b)) +} + +// NewDiffer creates a new configurable diffing object +func NewDiffer(opts ...func(d *Differ) error) (*Differ, error) { + var d Differ + + for _, opt := range opts { + err := opt(&d) + if err != nil { + return nil, err + } + } + + return &d, nil +} + +// StructValues gets all values from a struct +// values are stored as "created" or "deleted" entries in the changelog, +// depending on the change type specified +func StructValues(t string, path []string, s interface{}) (Changelog, error) { + var d Differ + v := reflect.ValueOf(s) + + return d.cl, d.structValues(t, path, v) +} + +// Filter filter changes based on path. Paths may contain valid regexp to match items +func (cl *Changelog) Filter(path []string) Changelog { + var ncl Changelog + + for _, c := range *cl { + if pathmatch(path, c.Path) { + ncl = append(ncl, c) + } + } + + return ncl +} + +// Diff returns a changelog of all mutated values from both +func (d *Differ) Diff(a, b interface{}) (Changelog, error) { + return d.cl, d.diff([]string{}, reflect.ValueOf(a), reflect.ValueOf(b)) +} + +func (d *Differ) diff(path []string, a, b reflect.Value) error { + // check if types match or are + if invalid(a, b) { + return ErrTypeMismatch + } + + switch { + case are(a, b, reflect.Struct, reflect.Invalid): + return d.diffStruct(path, a, b) + case are(a, b, reflect.Slice, reflect.Invalid): + return d.diffSlice(path, a, b) + case are(a, b, reflect.String, reflect.Invalid): + return d.diffString(path, a, b) + case are(a, b, reflect.Bool, reflect.Invalid): + return d.diffBool(path, a, b) + case are(a, b, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Invalid): + return d.diffInt(path, a, b) + case are(a, b, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Invalid): + return d.diffUint(path, a, b) + case are(a, b, reflect.Float32, reflect.Float64, reflect.Invalid): + return d.diffFloat(path, a, b) + case are(a, b, reflect.Map, reflect.Invalid): + return d.diffMap(path, a, b) + case are(a, b, reflect.Ptr, reflect.Invalid): + return d.diffPtr(path, a, b) + case are(a, b, reflect.Interface, reflect.Invalid): + return d.diffInterface(path, a, b) + default: + return errors.New("unsupported type: " + a.Kind().String()) + } +} + +func (cl *Changelog) add(t string, path []string, from, to interface{}) { + (*cl) = append((*cl), Change{ + Type: t, + Path: path, + From: from, + To: to, + }) +} + +func tagName(f reflect.StructField) string { + t := f.Tag.Get("diff") + + parts := strings.Split(t, ",") + if len(parts) < 1 { + return "-" + } + + return parts[0] +} + +func identifier(v reflect.Value) interface{} { + for i := 0; i < v.NumField(); i++ { + if hasTagOption(v.Type().Field(i), "identifier") { + return v.Field(i).Interface() + } + } + + return nil +} + +func hasTagOption(f reflect.StructField, opt string) bool { + parts := strings.Split(f.Tag.Get("diff"), ",") + if len(parts) < 2 { + return false + } + + for _, option := range parts[1:] { + if option == opt { + return true + } + } + + return false +} + +func swapChange(t string, c Change) Change { + nc := Change{ + Type: t, + Path: c.Path, + } + + switch t { + case CREATE: + nc.To = c.To + case DELETE: + nc.From = c.To + } + + return nc +} + +func idstring(v interface{}) string { + switch v.(type) { + case string: + return v.(string) + case int: + return strconv.Itoa(v.(int)) + default: + return fmt.Sprint(v) + } +} + +func invalid(a, b reflect.Value) bool { + if a.Kind() == b.Kind() { + return false + } + + if a.Kind() == reflect.Invalid { + return false + } + if b.Kind() == reflect.Invalid { + return false + } + + return true +} + +func are(a, b reflect.Value, kinds ...reflect.Kind) bool { + var amatch, bmatch bool + + for _, k := range kinds { + if a.Kind() == k { + amatch = true + } + if b.Kind() == k { + bmatch = true + } + } + + return amatch && bmatch +} + +func areType(a, b reflect.Value, types ...reflect.Type) bool { + var amatch, bmatch bool + + for _, t := range types { + if a.Kind() != reflect.Invalid { + if a.Type() == t { + amatch = true + } + } + if b.Kind() != reflect.Invalid { + if b.Type() == t { + bmatch = true + } + } + } + + return amatch && bmatch +} + +func copyAppend(src []string, elems ...string) []string { + dst := make([]string, len(src)+len(elems)) + copy(dst, src) + for i := len(src); i < len(src)+len(elems); i++ { + dst[i] = elems[i-len(src)] + } + return dst +} diff --git a/vendor/github.com/r3labs/diff/diff_bool.go b/vendor/github.com/r3labs/diff/diff_bool.go new file mode 100644 index 000000000..86c3cb441 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_bool.go @@ -0,0 +1,29 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import "reflect" + +func (d *Differ) diffBool(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.Bool() != b.Bool() { + d.cl.add(UPDATE, path, a.Interface(), b.Interface()) + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_comparative.go b/vendor/github.com/r3labs/diff/diff_comparative.go new file mode 100644 index 000000000..83cad4118 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_comparative.go @@ -0,0 +1,58 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffComparative(path []string, c *ComparativeList) error { + for _, k := range c.keys { + fpath := copyAppend(path, idstring(k)) + + nv := reflect.ValueOf(nil) + + if c.m[k].A == nil { + c.m[k].A = &nv + } + + if c.m[k].B == nil { + c.m[k].B = &nv + } + + err := d.diff(fpath, *c.m[k].A, *c.m[k].B) + if err != nil { + return err + } + } + + return nil +} + +func comparative(a, b reflect.Value) bool { + if a.Len() > 0 { + ae := a.Index(0) + ak := getFinalValue(ae) + + if ak.Kind() == reflect.Struct { + if identifier(ak) != nil { + return true + } + } + } + + if b.Len() > 0 { + be := b.Index(0) + bk := getFinalValue(be) + + if bk.Kind() == reflect.Struct { + if identifier(bk) != nil { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/r3labs/diff/diff_float.go b/vendor/github.com/r3labs/diff/diff_float.go new file mode 100644 index 000000000..4a663ae93 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_float.go @@ -0,0 +1,35 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffFloat(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.Float() != b.Float() { + if a.CanInterface() { + d.cl.add(UPDATE, path, a.Interface(), b.Interface()) + } else { + d.cl.add(UPDATE, path, a.Float(), b.Float()) + } + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_int.go b/vendor/github.com/r3labs/diff/diff_int.go new file mode 100644 index 000000000..e50ddf9cb --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_int.go @@ -0,0 +1,35 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffInt(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.Int() != b.Int() { + if a.CanInterface() { + d.cl.add(UPDATE, path, a.Interface(), b.Interface()) + } else { + d.cl.add(UPDATE, path, a.Int(), b.Int()) + } + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_interface.go b/vendor/github.com/r3labs/diff/diff_interface.go new file mode 100644 index 000000000..5ddedf8c7 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_interface.go @@ -0,0 +1,39 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import "reflect" + +func (d *Differ) diffInterface(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.IsNil() && b.IsNil() { + return nil + } + + if a.IsNil() { + d.cl.add(UPDATE, path, nil, b.Interface()) + return nil + } + + if b.IsNil() { + d.cl.add(UPDATE, path, a.Interface(), nil) + return nil + } + + return d.diff(path, a.Elem(), b.Elem()) +} diff --git a/vendor/github.com/r3labs/diff/diff_map.go b/vendor/github.com/r3labs/diff/diff_map.go new file mode 100644 index 000000000..815cbeb12 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_map.go @@ -0,0 +1,69 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "fmt" + "reflect" +) + +func (d *Differ) diffMap(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + return d.mapValues(CREATE, path, b) + } + + if b.Kind() == reflect.Invalid { + return d.mapValues(DELETE, path, a) + } + + c := NewComparativeList() + + for _, k := range a.MapKeys() { + ae := a.MapIndex(k) + c.addA(k.Interface(), &ae) + } + + for _, k := range b.MapKeys() { + be := b.MapIndex(k) + c.addB(k.Interface(), &be) + } + + return d.diffComparative(path, c) +} + +func (d *Differ) mapValues(t string, path []string, a reflect.Value) error { + if t != CREATE && t != DELETE { + return ErrInvalidChangeType + } + + if a.Kind() == reflect.Ptr { + a = reflect.Indirect(a) + } + + if a.Kind() != reflect.Map { + return ErrTypeMismatch + } + + x := reflect.New(a.Type()).Elem() + + for _, k := range a.MapKeys() { + ae := a.MapIndex(k) + xe := x.MapIndex(k) + + err := d.diff(append(path, fmt.Sprint(k.Interface())), xe, ae) + if err != nil { + return err + } + } + + for i := 0; i < len(d.cl); i++ { + // only swap changes on the relevant map + if pathmatch(path, d.cl[i].Path) { + d.cl[i] = swapChange(t, d.cl[i]) + } + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_pointer.go b/vendor/github.com/r3labs/diff/diff_pointer.go new file mode 100644 index 000000000..342f78294 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_pointer.go @@ -0,0 +1,43 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffPtr(path []string, a, b reflect.Value) error { + if a.Kind() != b.Kind() { + if a.Kind() == reflect.Invalid { + if !b.IsNil() { + return d.diff(path, reflect.ValueOf(nil), reflect.Indirect(b)) + } + } + + if b.Kind() == reflect.Invalid { + if !a.IsNil() { + return d.diff(path, reflect.Indirect(a), reflect.ValueOf(nil)) + } + } + + return ErrTypeMismatch + } + + if a.IsNil() && b.IsNil() { + return nil + } + + if a.IsNil() { + d.cl.add(UPDATE, path, nil, b.Interface()) + return nil + } + + if b.IsNil() { + d.cl.add(UPDATE, path, a.Interface(), nil) + return nil + } + + return d.diff(path, reflect.Indirect(a), reflect.Indirect(b)) +} diff --git a/vendor/github.com/r3labs/diff/diff_slice.go b/vendor/github.com/r3labs/diff/diff_slice.go new file mode 100644 index 000000000..04d07cc21 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_slice.go @@ -0,0 +1,121 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffSlice(path []string, a, b reflect.Value) error { + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if comparative(a, b) { + return d.diffSliceComparative(path, a, b) + } + + return d.diffSliceGeneric(path, a, b) +} + +func (d *Differ) diffSliceGeneric(path []string, a, b reflect.Value) error { + missing := NewComparativeList() + + slice := sliceTracker{} + for i := 0; i < a.Len(); i++ { + ae := a.Index(i) + + if (d.SliceOrdering && !hasAtSameIndex(b, ae, i)) || (!d.SliceOrdering && !slice.has(b, ae)) { + missing.addA(i, &ae) + } + } + + slice = sliceTracker{} + for i := 0; i < b.Len(); i++ { + be := b.Index(i) + + if (d.SliceOrdering && !hasAtSameIndex(a, be, i)) || (!d.SliceOrdering && !slice.has(a, be)) { + missing.addB(i, &be) + } + } + + // fallback to comparing based on order in slice if item is missing + if len(missing.keys) == 0 { + return nil + } + + return d.diffComparative(path, missing) +} + +func (d *Differ) diffSliceComparative(path []string, a, b reflect.Value) error { + c := NewComparativeList() + + for i := 0; i < a.Len(); i++ { + ae := a.Index(i) + ak := getFinalValue(ae) + + id := identifier(ak) + if id != nil { + c.addA(id, &ae) + } + } + + for i := 0; i < b.Len(); i++ { + be := b.Index(i) + bk := getFinalValue(be) + + id := identifier(bk) + if id != nil { + c.addB(id, &be) + } + } + + return d.diffComparative(path, c) +} + +// keeps track of elements that have already been matched, to stop duplicate matches from occuring +type sliceTracker []bool + +func (st *sliceTracker) has(s, v reflect.Value) bool { + if len(*st) != s.Len() { + (*st) = make([]bool, s.Len(), s.Len()) + } + + for i := 0; i < s.Len(); i++ { + // skip already matched elements + if (*st)[i] { + continue + } + + x := s.Index(i) + if reflect.DeepEqual(x.Interface(), v.Interface()) { + (*st)[i] = true + return true + } + } + + return false +} + +func getFinalValue(t reflect.Value) reflect.Value { + switch t.Kind() { + case reflect.Interface: + return getFinalValue(t.Elem()) + case reflect.Ptr: + return getFinalValue(reflect.Indirect(t)) + default: + return t + } +} + +func hasAtSameIndex(s, v reflect.Value, atIndex int) bool { + // check the element in the slice at atIndex to see if it matches value, if it is a valid index into the slice + if atIndex < s.Len() { + x := s.Index(atIndex) + return reflect.DeepEqual(x.Interface(), v.Interface()) + } + + return false +} diff --git a/vendor/github.com/r3labs/diff/diff_string.go b/vendor/github.com/r3labs/diff/diff_string.go new file mode 100644 index 000000000..52dd91115 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_string.go @@ -0,0 +1,29 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import "reflect" + +func (d *Differ) diffString(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.String() != b.String() { + d.cl.add(UPDATE, path, a.String(), b.String()) + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_struct.go b/vendor/github.com/r3labs/diff/diff_struct.go new file mode 100644 index 000000000..16e7faab5 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_struct.go @@ -0,0 +1,105 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" + "time" +) + +func (d *Differ) diffStruct(path []string, a, b reflect.Value) error { + if areType(a, b, reflect.TypeOf(time.Time{})) { + return d.diffTime(path, a, b) + } + + if a.Kind() == reflect.Invalid { + if d.DisableStructValues { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + return d.structValues(CREATE, path, b) + } + + if b.Kind() == reflect.Invalid { + if d.DisableStructValues { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + return d.structValues(DELETE, path, a) + } + + for i := 0; i < a.NumField(); i++ { + field := a.Type().Field(i) + tname := tagName(field) + + if tname == "-" || hasTagOption(field, "immutable") { + continue + } + + if tname == "" { + tname = field.Name + } + + af := a.Field(i) + bf := b.FieldByName(field.Name) + + fpath := copyAppend(path, tname) + + err := d.diff(fpath, af, bf) + if err != nil { + return err + } + } + + return nil +} + +func (d *Differ) structValues(t string, path []string, a reflect.Value) error { + var nd Differ + + if t != CREATE && t != DELETE { + return ErrInvalidChangeType + } + + if a.Kind() == reflect.Ptr { + a = reflect.Indirect(a) + } + + if a.Kind() != reflect.Struct { + return ErrTypeMismatch + } + + x := reflect.New(a.Type()).Elem() + + for i := 0; i < a.NumField(); i++ { + + field := a.Type().Field(i) + tname := tagName(field) + + if tname == "-" { + continue + } + + if tname == "" { + tname = field.Name + } + + af := a.Field(i) + xf := x.FieldByName(field.Name) + + fpath := copyAppend(path, tname) + + err := nd.diff(fpath, xf, af) + if err != nil { + return err + } + } + + for i := 0; i < len(nd.cl); i++ { + (d.cl) = append(d.cl, swapChange(t, nd.cl[i])) + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_time.go b/vendor/github.com/r3labs/diff/diff_time.go new file mode 100644 index 000000000..bd88b8028 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_time.go @@ -0,0 +1,36 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" + "time" +) + +func (d *Differ) diffTime(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + // Marshal and unmarshal time type will lose accuracy. Using unix nano to compare time type. + au := a.Interface().(time.Time).UnixNano() + bu := b.Interface().(time.Time).UnixNano() + + if au != bu { + d.cl.add(UPDATE, path, a.Interface(), b.Interface()) + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/diff_uint.go b/vendor/github.com/r3labs/diff/diff_uint.go new file mode 100644 index 000000000..1f0edcb28 --- /dev/null +++ b/vendor/github.com/r3labs/diff/diff_uint.go @@ -0,0 +1,35 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import ( + "reflect" +) + +func (d *Differ) diffUint(path []string, a, b reflect.Value) error { + if a.Kind() == reflect.Invalid { + d.cl.add(CREATE, path, nil, b.Interface()) + return nil + } + + if b.Kind() == reflect.Invalid { + d.cl.add(DELETE, path, a.Interface(), nil) + return nil + } + + if a.Kind() != b.Kind() { + return ErrTypeMismatch + } + + if a.Uint() != b.Uint() { + if a.CanInterface() { + d.cl.add(UPDATE, path, a.Interface(), b.Interface()) + } else { + d.cl.add(UPDATE, path, a.Uint(), b.Uint()) + } + } + + return nil +} diff --git a/vendor/github.com/r3labs/diff/filter.go b/vendor/github.com/r3labs/diff/filter.go new file mode 100644 index 000000000..12e549a6a --- /dev/null +++ b/vendor/github.com/r3labs/diff/filter.go @@ -0,0 +1,22 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +package diff + +import "regexp" + +func pathmatch(filter, path []string) bool { + for i, f := range filter { + if len(path) < i+1 { + return false + } + + matched, _ := regexp.MatchString(f, path[i]) + if !matched { + return false + } + } + + return true +} diff --git a/vendor/github.com/r3labs/diff/options.go b/vendor/github.com/r3labs/diff/options.go new file mode 100644 index 000000000..627d2b8d5 --- /dev/null +++ b/vendor/github.com/r3labs/diff/options.go @@ -0,0 +1,18 @@ +package diff + +// SliceOrdering determines whether the ordering of items in a slice results in a change +func SliceOrdering(enabled bool) func(d *Differ) error { + return func(d *Differ) error { + d.SliceOrdering = enabled + return nil + } +} + +// DisableStructValues disables populating a seperate change for each item in a struct, +// where the struct is being compared to a nil value +func DisableStructValues() func(d *Differ) error { + return func(d *Differ) error { + d.DisableStructValues = true + return nil + } +} diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml new file mode 100644 index 000000000..8fc1261cb --- /dev/null +++ b/vendor/github.com/spf13/afero/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +go: + - 1.9 + - "1.10" + - tip + +os: + - linux + - osx + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build + - go test -race -v ./... + diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 000000000..298f0e266 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 000000000..0c9b04b53 --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,452 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is an filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in a any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware + + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define you own file system. +* Wrap for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs = afero.NewMemMapFs() + +or + +var AppFs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open('/tmp/foo') +``` +We would replace it with: +```go +AppFs.Open('/tmp/foo') +``` + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs() +afs := &afero.Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS := afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + name := "src/c" + _, err := appFS.Stat(name) + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755)) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755)) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir())) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* ZIP +* TAR +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +* **0.10.0** 2015.12.10 + * Full compatibility with Windows + * Introduction of afero utilities + * Test suite rewritten to work cross platform + * Normalize paths for MemMapFs + * Adding Sync to the file interface + * **Breaking Change** Walk and ReadDir have changed parameter order + * Moving types used by MemMapFs to a subpackage + * General bugfixes and improvements +* **0.9.0** 2015.11.05 + * New Walk function similar to filepath.Walk + * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC + * MemMapFs.Remove now really deletes the file + * InMemoryFile.Readdir and Readdirnames work correctly + * InMemoryFile functions lock it for concurrent access + * Test suite improvements +* **0.8.0** 2014.10.28 + * First public version + * Interfaces feel ready for people to build using + * Interfaces satisfy all known uses + * MemMapFs passes the majority of the OS test suite + * OsFs passes the majority of the OS test suite + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 000000000..f5b5e127c --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 000000000..a633ad500 --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,15 @@ +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + + go env + + go get -v github.com/spf13/afero/... + + go build github.com/spf13/afero +test_script: +- cmd: go test -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 000000000..616ff8ff7 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,180 @@ +package afero + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var _ Lstater = (*BasePathFs)(nil) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 000000000..29a26c67d --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,290 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 000000000..5728243d9 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 000000000..968fc2783 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 000000000..e8108a851 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,293 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/go.mod b/vendor/github.com/spf13/afero/go.mod new file mode 100644 index 000000000..086855099 --- /dev/null +++ b/vendor/github.com/spf13/afero/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/afero + +require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/spf13/afero/go.sum b/vendor/github.com/spf13/afero/go.sum new file mode 100644 index 000000000..6bad37b2a --- /dev/null +++ b/vendor/github.com/spf13/afero/go.sum @@ -0,0 +1,2 @@ +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 000000000..c42193688 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 000000000..5c3a3d8ff --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 000000000..89c1bfc0a --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 000000000..c18a87fb7 --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.IndexAny(path, "*?[") >= 0 +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 000000000..e104013f4 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 000000000..03a57ee5b --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 000000000..7af2fb56f --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,317 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 000000000..09498e70f --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,365 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + + m.Chmod(name, perm|os.ModeDir) + + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + chmod := false + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + m.Chmod(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} + +// func debugMemMapList(fs Fs) { +// if x, ok := fs.(*MemMapFs); ok { +// x.List() +// } +// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 000000000..13cc1b84c --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,101 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 000000000..18f60a0f6 --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 000000000..c6376ec37 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,80 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 000000000..9d92dbc05 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 000000000..eda96312d --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,320 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + var files = make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil + +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + + if c <= 0 && len(f.files) == 0 { + return f.files, nil + } + + if f.off >= len(f.files) { + return nil, io.EOF + } + + if c <= 0 { + return f.files[f.off:], nil + } + + if c > len(f.files) { + c = len(f.files) + } + + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 000000000..4f253f481 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,330 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 000000000..f38ec5956 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 000000000..aa1c2b95c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,484 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 000000000..d2bb0b817 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + if h, ok := t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 000000000..de39f794e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,956 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Errorf(a.t, err, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExistsf(a.t, path, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lenf(a.t, object, length, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nilf(a.t, object, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoErrorf(a.t, err, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexpf(a.t, rx, str, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 000000000..188bb9e17 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + if h, ok := a.t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 000000000..9bd4a80e4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1416 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) + } + + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + msg := msgAndArgs[0] + if msgAsStr, ok := msg.(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msg) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("\n%s", ""+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// containsKind checks if a specified kind in the slice of kinds. +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + if et != reflect.TypeOf("") { + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) + } else { + e = expected.(string) + a = actual.(string) + } + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 000000000..c9dccc4d6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 000000000..ac9dc9d1d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 000000000..9ad56851d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 000000000..df46fa777 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,143 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url, nil) + if err != nil { + return -1, err + } + req.URL.RawQuery = values.Encode() + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 000000000..e3c2fc2f1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,25 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 000000000..58ebdc162 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 000000000..0a2dc8284 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,73 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 000000000..c10e51b9a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,319 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters + and optimize parameters +4. Do some optimizations + - rename operation action and make it a simple type of size 8 + - make maxMatches, wordSize parameters + - stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +3. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### Release v0.6 + +- Rewrite Encoder into a simple greedy one-op-at-a-time encoder + including + + simple scan at the dictionary head for the same byte + + use the killer byte (requiring matches to get longer, the first + test should be the byte that would make the match longer) + + +## Optimizations + +- There may be a lot of false sharing in lzma.State; check whether this + can be improved by reorganizing the internal structure of it. +- Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +- Use full buffer to create minimal bit-length above range encoder. +- Might be too slow (see v0.4) + +### Different match finders + +- hashes with 2, 3 characters additional to 4 characters +- binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) +- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) + +## Release Procedure + +- execute goch -l for all packages; probably with lower param like 0.5. +- check orthography with gospell +- Write release notes in doc/relnotes. +- Update README.md +- xb copyright . in xz directory to ensure all new files have Copyright + header +- VERSION= go generate github.com/ulikunitz/xz/... to update + version files +- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +- Update TODO.md - write short log entry +- git checkout master && git merge dev +- git tag -a +- git push + +## Log + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma.Reader +and lzma.Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase.Reader and lzbase.Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma.Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma.Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + +https://talks.golang.org/2014/go4java.slide#51 + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + - compression worked correctly; tested decompression with lzma + - decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +- Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 000000000..fadc1a594 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 000000000..4b820bd5a Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox.xz differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 000000000..a32887872 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 000000000..f99ec2206 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 000000000..58635b113 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 000000000..ab6a19ca4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 000000000..0ba45e8ff --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 000000000..a781bd195 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 000000000..e9bab0199 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 000000000..5350d814f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 000000000..50e0b6d57 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 000000000..a3696ba08 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 000000000..16e14db39 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 000000000..564a12b83 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 000000000..e08eb989f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 000000000..b053a2dce --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 000000000..fe1900a66 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 000000000..9d0fbc703 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 000000000..5edad6332 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 000000000..d786a9745 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 000000000..bc708969f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 000000000..ac6a71a5a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 000000000..e51773092 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 000000000..c949d6ebd --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 000000000..4a244eb1a --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 000000000..733bb99da --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 000000000..24d50ec68 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 000000000..23418e25d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 000000000..6361c5e7c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 000000000..2ef3dcaaa --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 000000000..a55cfaa4e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 000000000..502351052 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 000000000..504b3d78e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 000000000..7c1afe157 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 000000000..69cf5f7c2 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 000000000..a8c612ce1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 000000000..0634c6bcc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 000000000..c126f7099 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/.travis.yml b/vendor/github.com/vmihailenco/msgpack/.travis.yml new file mode 100644 index 000000000..0c2f74ed4 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get gopkg.in/check.v1 diff --git a/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md new file mode 100644 index 000000000..9a4f38a93 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/CHANGELOG.md @@ -0,0 +1,24 @@ +## 3.4 + +- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. +- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. +- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. + +## v3.3 + +- `msgpack:",inline"` tag is restored to force inlining structs. + +## v3.2 + +- Decoding extension types returns pointer to the value instead of the value. Fixes #153 + +## v3 + +- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. +- Msgpack maps are decoded into map[string]interface{} by default. +- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. +- Embedded structs are automatically inlined where possible. +- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. +- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. +- DecodeInterface can now return int8/16/32 and uint8/16/32. +- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/LICENSE b/vendor/github.com/vmihailenco/msgpack/LICENSE new file mode 100644 index 000000000..b749d0707 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/Makefile b/vendor/github.com/vmihailenco/msgpack/Makefile new file mode 100644 index 000000000..b62ae6a46 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/Makefile @@ -0,0 +1,5 @@ +all: + go test ./... + env GOOS=linux GOARCH=386 go test ./... + go test ./... -short -race + go vet diff --git a/vendor/github.com/vmihailenco/msgpack/README.md b/vendor/github.com/vmihailenco/msgpack/README.md new file mode 100644 index 000000000..0c75ae16e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/README.md @@ -0,0 +1,69 @@ +# MessagePack encoding for Golang + +[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) +[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) + +Supports: +- Primitives, arrays, maps, structs, time.Time and interface{}. +- Appengine *datastore.Key and datastore.Cursor. +- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. +- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. +- Renaming fields via `msgpack:"my_field_name"`. +- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). +- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). +- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.StructAsArray) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). +- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. +- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). + +API docs: https://godoc.org/github.com/vmihailenco/msgpack. +Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. + +## Installation + +Install: + +```shell +go get -u github.com/vmihailenco/msgpack +``` + +## Quickstart + +```go +func ExampleMarshal() { + type Item struct { + Foo string + } + + b, err := msgpack.Marshal(&Item{Foo: "bar"}) + if err != nil { + panic(err) + } + + var item Item + err = msgpack.Unmarshal(b, &item) + if err != nil { + panic(err) + } + fmt.Println(item.Foo) + // Output: bar +} +``` + +## Benchmark + +``` +BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op +BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op +BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op +BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op +BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/vendor/github.com/vmihailenco/msgpack/appengine.go b/vendor/github.com/vmihailenco/msgpack/appengine.go new file mode 100644 index 000000000..e8e91e53f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/appengine.go @@ -0,0 +1,64 @@ +// +build appengine + +package msgpack + +import ( + "reflect" + + ds "google.golang.org/appengine/datastore" +) + +func init() { + Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) + Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) +} + +func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { + if key == nil { + return e.EncodeNil() + } + return e.EncodeString(key.Encode()) +} + +func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { + key := v.Interface().(*ds.Key) + return EncodeDatastoreKey(e, key) +} + +func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { + v, err := d.DecodeString() + if err != nil { + return nil, err + } + if v == "" { + return nil, nil + } + return ds.DecodeKey(v) +} + +func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { + key, err := DecodeDatastoreKey(d) + if err != nil { + return err + } + v.Set(reflect.ValueOf(key)) + return nil +} + +func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { + cursor := v.Interface().(ds.Cursor) + return e.Encode(cursor.String()) +} + +func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + cursor, err := ds.DecodeCursor(s) + if err != nil { + return err + } + v.Set(reflect.ValueOf(cursor)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/codes/codes.go new file mode 100644 index 000000000..906af376e --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/codes/codes.go @@ -0,0 +1,86 @@ +package codes + +type Code byte + +var ( + PosFixedNumHigh Code = 0x7f + NegFixedNumLow Code = 0xe0 + + Nil Code = 0xc0 + + False Code = 0xc2 + True Code = 0xc3 + + Float Code = 0xca + Double Code = 0xcb + + Uint8 Code = 0xcc + Uint16 Code = 0xcd + Uint32 Code = 0xce + Uint64 Code = 0xcf + + Int8 Code = 0xd0 + Int16 Code = 0xd1 + Int32 Code = 0xd2 + Int64 Code = 0xd3 + + FixedStrLow Code = 0xa0 + FixedStrHigh Code = 0xbf + FixedStrMask Code = 0x1f + Str8 Code = 0xd9 + Str16 Code = 0xda + Str32 Code = 0xdb + + Bin8 Code = 0xc4 + Bin16 Code = 0xc5 + Bin32 Code = 0xc6 + + FixedArrayLow Code = 0x90 + FixedArrayHigh Code = 0x9f + FixedArrayMask Code = 0xf + Array16 Code = 0xdc + Array32 Code = 0xdd + + FixedMapLow Code = 0x80 + FixedMapHigh Code = 0x8f + FixedMapMask Code = 0xf + Map16 Code = 0xde + Map32 Code = 0xdf + + FixExt1 Code = 0xd4 + FixExt2 Code = 0xd5 + FixExt4 Code = 0xd6 + FixExt8 Code = 0xd7 + FixExt16 Code = 0xd8 + Ext8 Code = 0xc7 + Ext16 Code = 0xc8 + Ext32 Code = 0xc9 +) + +func IsFixedNum(c Code) bool { + return c <= PosFixedNumHigh || c >= NegFixedNumLow +} + +func IsFixedMap(c Code) bool { + return c >= FixedMapLow && c <= FixedMapHigh +} + +func IsFixedArray(c Code) bool { + return c >= FixedArrayLow && c <= FixedArrayHigh +} + +func IsFixedString(c Code) bool { + return c >= FixedStrLow && c <= FixedStrHigh +} + +func IsString(c Code) bool { + return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 +} + +func IsFixedExt(c Code) bool { + return c >= FixExt1 && c <= FixExt16 +} + +func IsExt(c Code) bool { + return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode.go b/vendor/github.com/vmihailenco/msgpack/decode.go new file mode 100644 index 000000000..741410040 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode.go @@ -0,0 +1,546 @@ +package msgpack + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +const bytesAllocLimit = 1024 * 1024 // 1mb + +type bufReader interface { + io.Reader + io.ByteScanner +} + +func newBufReader(r io.Reader) bufReader { + if br, ok := r.(bufReader); ok { + return br + } + return bufio.NewReader(r) +} + +func makeBuffer() []byte { + return make([]byte, 0, 64) +} + +// Unmarshal decodes the MessagePack-encoded data and stores the result +// in the value pointed to by v. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +type Decoder struct { + r io.Reader + s io.ByteScanner + buf []byte + + extLen int + rec []byte // accumulates read data if not nil + + useLoose bool + useJSONTag bool + + decodeMapFunc func(*Decoder) (interface{}, error) +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r +// beyond the MessagePack values requested. Buffering can be disabled +// by passing a reader that implements io.ByteScanner interface. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + buf: makeBuffer(), + } + d.resetReader(r) + return d +} + +func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { + d.decodeMapFunc = fn +} + +// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose +// to decode msgpack value into Go interface{}. +func (d *Decoder) UseDecodeInterfaceLoose(flag bool) { + d.useLoose = flag +} + +// UseJSONTag causes the Decoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (d *Decoder) UseJSONTag(v bool) *Decoder { + d.useJSONTag = v + return d +} + +func (d *Decoder) Reset(r io.Reader) error { + d.resetReader(r) + return nil +} + +func (d *Decoder) resetReader(r io.Reader) { + reader := newBufReader(r) + d.r = reader + d.s = reader +} + +func (d *Decoder) Decode(v interface{}) error { + var err error + switch v := v.(type) { + case *string: + if v != nil { + *v, err = d.DecodeString() + return err + } + case *[]byte: + if v != nil { + return d.decodeBytesPtr(v) + } + case *int: + if v != nil { + *v, err = d.DecodeInt() + return err + } + case *int8: + if v != nil { + *v, err = d.DecodeInt8() + return err + } + case *int16: + if v != nil { + *v, err = d.DecodeInt16() + return err + } + case *int32: + if v != nil { + *v, err = d.DecodeInt32() + return err + } + case *int64: + if v != nil { + *v, err = d.DecodeInt64() + return err + } + case *uint: + if v != nil { + *v, err = d.DecodeUint() + return err + } + case *uint8: + if v != nil { + *v, err = d.DecodeUint8() + return err + } + case *uint16: + if v != nil { + *v, err = d.DecodeUint16() + return err + } + case *uint32: + if v != nil { + *v, err = d.DecodeUint32() + return err + } + case *uint64: + if v != nil { + *v, err = d.DecodeUint64() + return err + } + case *bool: + if v != nil { + *v, err = d.DecodeBool() + return err + } + case *float32: + if v != nil { + *v, err = d.DecodeFloat32() + return err + } + case *float64: + if v != nil { + *v, err = d.DecodeFloat64() + return err + } + case *[]string: + return d.decodeStringSlicePtr(v) + case *map[string]string: + return d.decodeMapStringStringPtr(v) + case *map[string]interface{}: + return d.decodeMapStringInterfacePtr(v) + case *time.Duration: + if v != nil { + vv, err := d.DecodeInt64() + *v = time.Duration(vv) + return err + } + case *time.Time: + if v != nil { + *v, err = d.DecodeTime() + return err + } + } + + vv := reflect.ValueOf(v) + if !vv.IsValid() { + return errors.New("msgpack: Decode(nil)") + } + if vv.Kind() != reflect.Ptr { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + vv = vv.Elem() + if !vv.IsValid() { + return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) + } + return d.DecodeValue(vv) +} + +func (d *Decoder) DecodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := d.Decode(vv); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeInterfaceCond() (interface{}, error) { + if d.useLoose { + return d.DecodeInterfaceLoose() + } + return d.DecodeInterface() +} + +func (d *Decoder) DecodeValue(v reflect.Value) error { + decode := getDecoder(v.Type()) + return decode(d, v) +} + +func (d *Decoder) DecodeNil() error { + c, err := d.readCode() + if err != nil { + return err + } + if c != codes.Nil { + return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) + } + return nil +} + +func (d *Decoder) decodeNilValue(v reflect.Value) error { + err := d.DecodeNil() + if v.IsNil() { + return err + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + v.Set(reflect.Zero(v.Type())) + return err +} + +func (d *Decoder) DecodeBool() (bool, error) { + c, err := d.readCode() + if err != nil { + return false, err + } + return d.bool(c) +} + +func (d *Decoder) bool(c codes.Code) (bool, error) { + if c == codes.False { + return false, nil + } + if c == codes.True { + return true, nil + } + return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) +} + +// DecodeInterface decodes value into interface. It returns following types: +// - nil, +// - bool, +// - int8, int16, int32, int64, +// - uint8, uint16, uint32, uint64, +// - float32 and float64, +// - string, +// - []byte, +// - slices of any of the above, +// - maps of any of the above. +// +// DecodeInterface should be used only when you don't know the type of value +// you are decoding. For example, if you are decoding number it is better to use +// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. +func (d *Decoder) DecodeInterface() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int8(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float: + return d.float32(c) + case codes.Double: + return d.float64(c) + case codes.Uint8: + return d.uint8() + case codes.Uint16: + return d.uint16() + case codes.Uint32: + return d.uint32() + case codes.Uint64: + return d.uint64() + case codes.Int8: + return d.int8() + case codes.Int16: + return d.int16() + case codes.Int32: + return d.int32() + case codes.Int64: + return d.int64() + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// DecodeInterfaceLoose is like DecodeInterface except that: +// - int8, int16, and int32 are converted to int64, +// - uint8, uint16, and uint32 are converted to uint64, +// - float32 is converted to float64. +func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + + if codes.IsFixedNum(c) { + return int64(c), nil + } + if codes.IsFixedMap(c) { + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + } + if codes.IsFixedArray(c) { + return d.decodeSlice(c) + } + if codes.IsFixedString(c) { + return d.string(c) + } + + switch c { + case codes.Nil: + return nil, nil + case codes.False, codes.True: + return d.bool(c) + case codes.Float, codes.Double: + return d.float64(c) + case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: + return d.uint(c) + case codes.Int8, codes.Int16, codes.Int32, codes.Int64: + return d.int(c) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.bytes(c, nil) + case codes.Str8, codes.Str16, codes.Str32: + return d.string(c) + case codes.Array16, codes.Array32: + return d.decodeSlice(c) + case codes.Map16, codes.Map32: + err = d.s.UnreadByte() + if err != nil { + return nil, err + } + return d.DecodeMap() + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.extInterface(c) + } + + return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) +} + +// Skip skips next value. +func (d *Decoder) Skip() error { + c, err := d.readCode() + if err != nil { + return err + } + + if codes.IsFixedNum(c) { + return nil + } else if codes.IsFixedMap(c) { + return d.skipMap(c) + } else if codes.IsFixedArray(c) { + return d.skipSlice(c) + } else if codes.IsFixedString(c) { + return d.skipBytes(c) + } + + switch c { + case codes.Nil, codes.False, codes.True: + return nil + case codes.Uint8, codes.Int8: + return d.skipN(1) + case codes.Uint16, codes.Int16: + return d.skipN(2) + case codes.Uint32, codes.Int32, codes.Float: + return d.skipN(4) + case codes.Uint64, codes.Int64, codes.Double: + return d.skipN(8) + case codes.Bin8, codes.Bin16, codes.Bin32: + return d.skipBytes(c) + case codes.Str8, codes.Str16, codes.Str32: + return d.skipBytes(c) + case codes.Array16, codes.Array32: + return d.skipSlice(c) + case codes.Map16, codes.Map32: + return d.skipMap(c) + case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, + codes.Ext8, codes.Ext16, codes.Ext32: + return d.skipExt(c) + } + + return fmt.Errorf("msgpack: unknown code %x", c) +} + +// PeekCode returns the next MessagePack code without advancing the reader. +// Subpackage msgpack/codes contains list of available codes. +func (d *Decoder) PeekCode() (codes.Code, error) { + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + return codes.Code(c), d.s.UnreadByte() +} + +func (d *Decoder) hasNilCode() bool { + code, err := d.PeekCode() + return err == nil && code == codes.Nil +} + +func (d *Decoder) readCode() (codes.Code, error) { + d.extLen = 0 + c, err := d.s.ReadByte() + if err != nil { + return 0, err + } + if d.rec != nil { + d.rec = append(d.rec, c) + } + return codes.Code(c), nil +} + +func (d *Decoder) readFull(b []byte) error { + _, err := io.ReadFull(d.r, b) + if err != nil { + return err + } + if d.rec != nil { + d.rec = append(d.rec, b...) + } + return nil +} + +func (d *Decoder) readN(n int) ([]byte, error) { + buf, err := readN(d.r, d.buf, n) + if err != nil { + return nil, err + } + d.buf = buf + if d.rec != nil { + d.rec = append(d.rec, buf...) + } + return buf, nil +} + +func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + if n <= bytesAllocLimit { + b = make([]byte, n) + } else { + b = make([]byte, bytesAllocLimit) + } + } + + if n <= cap(b) { + b = b[:n] + _, err := io.ReadFull(r, b) + return b, err + } + b = b[:cap(b)] + + var pos int + for { + alloc := n - len(b) + if alloc > bytesAllocLimit { + alloc = bytesAllocLimit + } + b = append(b, make([]byte, alloc)...) + + _, err := io.ReadFull(r, b[pos:]) + if err != nil { + return nil, err + } + + if len(b) == n { + break + } + pos = len(b) + } + + return b, nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_map.go b/vendor/github.com/vmihailenco/msgpack/decode_map.go new file mode 100644 index 000000000..b542a7541 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_map.go @@ -0,0 +1,338 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const mapElemsAllocLimit = 1e4 + +var mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) +var mapStringStringType = mapStringStringPtrType.Elem() + +var mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) +var mapStringInterfaceType = mapStringInterfacePtrType.Elem() + +var errInvalidCode = errors.New("invalid code") + +func decodeMapValue(d *Decoder, v reflect.Value) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + + typ := v.Type() + if size == -1 { + v.Set(reflect.Zero(typ)) + return nil + } + + if v.IsNil() { + v.Set(reflect.MakeMap(typ)) + } + if size == 0 { + return nil + } + + return decodeMapValueSize(d, v, size) +} + +func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { + typ := v.Type() + keyType := typ.Key() + valueType := typ.Elem() + + for i := 0; i < size; i++ { + mk := reflect.New(keyType).Elem() + if err := d.DecodeValue(mk); err != nil { + return err + } + + mv := reflect.New(valueType).Elem() + if err := d.DecodeValue(mv); err != nil { + return err + } + + v.SetMapIndex(mk, mv) + } + + return nil +} + +// DecodeMapLen decodes map length. Length is -1 when map is nil. +func (d *Decoder) DecodeMapLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + + if codes.IsExt(c) { + if err = d.skipExtHeader(c); err != nil { + return 0, err + } + + c, err = d.readCode() + if err != nil { + return 0, err + } + } + return d.mapLen(c) +} + +func (d *Decoder) mapLen(c codes.Code) (int, error) { + size, err := d._mapLen(c) + err = expandInvalidCodeMapLenError(c, err) + return size, err +} + +func (d *Decoder) _mapLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } + if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { + return int(c & codes.FixedMapMask), nil + } + if c == codes.Map16 { + size, err := d.uint16() + return int(size), err + } + if c == codes.Map32 { + size, err := d.uint32() + return int(size), err + } + return 0, errInvalidCode +} + +func expandInvalidCodeMapLenError(c codes.Code, err error) error { + if err == errInvalidCode { + return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) + } + return err +} + +func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { + mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) + return d.decodeMapStringStringPtr(mptr) +} + +func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { + size, err := d.DecodeMapLen() + if err != nil { + return err + } + if size == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]string, min(size, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.DecodeString() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) + return d.decodeMapStringInterfacePtr(ptr) +} + +func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + m := *ptr + if m == nil { + *ptr = make(map[string]interface{}, min(n, mapElemsAllocLimit)) + m = *ptr + } + + for i := 0; i < n; i++ { + mk, err := d.DecodeString() + if err != nil { + return err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + m[mk] = mv + } + + return nil +} + +func (d *Decoder) DecodeMap() (interface{}, error) { + if d.decodeMapFunc != nil { + return d.decodeMapFunc(d) + } + + size, err := d.DecodeMapLen() + if err != nil { + return nil, err + } + if size == -1 { + return nil, nil + } + if size == 0 { + return make(map[string]interface{}), nil + } + + code, err := d.PeekCode() + if err != nil { + return nil, err + } + + if codes.IsString(code) { + return d.decodeMapStringInterfaceSize(size) + } + + key, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + value, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + + keyType := reflect.TypeOf(key) + valueType := reflect.TypeOf(value) + mapType := reflect.MapOf(keyType, valueType) + mapValue := reflect.MakeMap(mapType) + + mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) + size-- + + err = decodeMapValueSize(d, mapValue, size) + if err != nil { + return nil, err + } + + return mapValue.Interface(), nil +} + +func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { + m := make(map[string]interface{}, min(size, mapElemsAllocLimit)) + for i := 0; i < size; i++ { + mk, err := d.DecodeString() + if err != nil { + return nil, err + } + mv, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + m[mk] = mv + } + return m, nil +} + +func (d *Decoder) skipMap(c codes.Code) error { + n, err := d.mapLen(c) + if err != nil { + return err + } + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + if err := d.Skip(); err != nil { + return err + } + } + return nil +} + +func decodeStructValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + var isArray bool + + n, err := d._mapLen(c) + if err != nil { + var err2 error + n, err2 = d.arrayLen(c) + if err2 != nil { + return expandInvalidCodeMapLenError(c, err) + } + isArray = true + } + if n == -1 { + if err = mustSet(v); err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil + } + + var fields *fields + if d.useJSONTag { + fields = jsonStructs.Fields(v.Type()) + } else { + fields = structs.Fields(v.Type()) + } + + if isArray { + for i, f := range fields.List { + if i >= n { + break + } + if err := f.DecodeValue(d, v); err != nil { + return err + } + } + // Skip extra values. + for i := len(fields.List); i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil + } + + for i := 0; i < n; i++ { + name, err := d.DecodeString() + if err != nil { + return err + } + if f := fields.Table[name]; f != nil { + if err := f.DecodeValue(d, v); err != nil { + return err + } + } else { + if err := d.Skip(); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_number.go b/vendor/github.com/vmihailenco/msgpack/decode_number.go new file mode 100644 index 000000000..15019cc97 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_number.go @@ -0,0 +1,307 @@ +package msgpack + +import ( + "fmt" + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) skipN(n int) error { + _, err := d.readN(n) + return err +} + +func (d *Decoder) uint8() (uint8, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return uint8(c), nil +} + +func (d *Decoder) int8() (int8, error) { + n, err := d.uint8() + return int8(n), err +} + +func (d *Decoder) uint16() (uint16, error) { + b, err := d.readN(2) + if err != nil { + return 0, err + } + return (uint16(b[0]) << 8) | uint16(b[1]), nil +} + +func (d *Decoder) int16() (int16, error) { + n, err := d.uint16() + return int16(n), err +} + +func (d *Decoder) uint32() (uint32, error) { + b, err := d.readN(4) + if err != nil { + return 0, err + } + n := (uint32(b[0]) << 24) | + (uint32(b[1]) << 16) | + (uint32(b[2]) << 8) | + uint32(b[3]) + return n, nil +} + +func (d *Decoder) int32() (int32, error) { + n, err := d.uint32() + return int32(n), err +} + +func (d *Decoder) uint64() (uint64, error) { + b, err := d.readN(8) + if err != nil { + return 0, err + } + n := (uint64(b[0]) << 56) | + (uint64(b[1]) << 48) | + (uint64(b[2]) << 40) | + (uint64(b[3]) << 32) | + (uint64(b[4]) << 24) | + (uint64(b[5]) << 16) | + (uint64(b[6]) << 8) | + uint64(b[7]) + return n, nil +} + +func (d *Decoder) int64() (int64, error) { + n, err := d.uint64() + return int64(n), err +} + +// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go uint64. +func (d *Decoder) DecodeUint64() (uint64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.uint(c) +} + +func (d *Decoder) uint(c codes.Code) (uint64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return uint64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return uint64(n), err + case codes.Int8: + n, err := d.int8() + return uint64(n), err + case codes.Uint16: + n, err := d.uint16() + return uint64(n), err + case codes.Int16: + n, err := d.int16() + return uint64(n), err + case codes.Uint32: + n, err := d.uint32() + return uint64(n), err + case codes.Int32: + n, err := d.int32() + return uint64(n), err + case codes.Uint64, codes.Int64: + return d.uint64() + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) +} + +// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 +// into Go int64. +func (d *Decoder) DecodeInt64() (int64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.int(c) +} + +func (d *Decoder) int(c codes.Code) (int64, error) { + if c == codes.Nil { + return 0, nil + } + if codes.IsFixedNum(c) { + return int64(int8(c)), nil + } + switch c { + case codes.Uint8: + n, err := d.uint8() + return int64(n), err + case codes.Int8: + n, err := d.uint8() + return int64(int8(n)), err + case codes.Uint16: + n, err := d.uint16() + return int64(n), err + case codes.Int16: + n, err := d.uint16() + return int64(int16(n)), err + case codes.Uint32: + n, err := d.uint32() + return int64(n), err + case codes.Int32: + n, err := d.uint32() + return int64(int32(n)), err + case codes.Uint64, codes.Int64: + n, err := d.uint64() + return int64(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) +} + +func (d *Decoder) DecodeFloat32() (float32, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float32(c) +} + +func (d *Decoder) float32(c codes.Code) (float32, error) { + if c == codes.Float { + n, err := d.uint32() + if err != nil { + return 0, err + } + return math.Float32frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float32(n), nil +} + +// DecodeFloat64 decodes msgpack float32/64 into Go float64. +func (d *Decoder) DecodeFloat64() (float64, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.float64(c) +} + +func (d *Decoder) float64(c codes.Code) (float64, error) { + switch c { + case codes.Float: + n, err := d.float32(c) + if err != nil { + return 0, err + } + return float64(n), nil + case codes.Double: + n, err := d.uint64() + if err != nil { + return 0, err + } + return math.Float64frombits(n), nil + } + + n, err := d.int(c) + if err != nil { + return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) + } + return float64(n), nil +} + +func (d *Decoder) DecodeUint() (uint, error) { + n, err := d.DecodeUint64() + return uint(n), err +} + +func (d *Decoder) DecodeUint8() (uint8, error) { + n, err := d.DecodeUint64() + return uint8(n), err +} + +func (d *Decoder) DecodeUint16() (uint16, error) { + n, err := d.DecodeUint64() + return uint16(n), err +} + +func (d *Decoder) DecodeUint32() (uint32, error) { + n, err := d.DecodeUint64() + return uint32(n), err +} + +func (d *Decoder) DecodeInt() (int, error) { + n, err := d.DecodeInt64() + return int(n), err +} + +func (d *Decoder) DecodeInt8() (int8, error) { + n, err := d.DecodeInt64() + return int8(n), err +} + +func (d *Decoder) DecodeInt16() (int16, error) { + n, err := d.DecodeInt64() + return int16(n), err +} + +func (d *Decoder) DecodeInt32() (int32, error) { + n, err := d.DecodeInt64() + return int32(n), err +} + +func decodeFloat32Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat32() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(float64(f)) + return nil +} + +func decodeFloat64Value(d *Decoder, v reflect.Value) error { + f, err := d.DecodeFloat64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetFloat(f) + return nil +} + +func decodeInt64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeInt64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetInt(n) + return nil +} + +func decodeUint64Value(d *Decoder, v reflect.Value) error { + n, err := d.DecodeUint64() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetUint(n) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_query.go b/vendor/github.com/vmihailenco/msgpack/decode_query.go new file mode 100644 index 000000000..d680be80c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_query.go @@ -0,0 +1,158 @@ +package msgpack + +import ( + "fmt" + "strconv" + "strings" + + "github.com/vmihailenco/msgpack/codes" +) + +type queryResult struct { + query string + key string + hasAsterisk bool + + values []interface{} +} + +func (q *queryResult) nextKey() { + ind := strings.IndexByte(q.query, '.') + if ind == -1 { + q.key = q.query + q.query = "" + return + } + q.key = q.query[:ind] + q.query = q.query[ind+1:] +} + +// Query extracts data specified by the query from the msgpack stream skipping +// any other data. Query consists of map keys and array indexes separated with dot, +// e.g. key1.0.key2. +func (d *Decoder) Query(query string) ([]interface{}, error) { + res := queryResult{ + query: query, + } + if err := d.query(&res); err != nil { + return nil, err + } + return res.values, nil +} + +func (d *Decoder) query(q *queryResult) error { + q.nextKey() + if q.key == "" { + v, err := d.decodeInterfaceCond() + if err != nil { + return err + } + q.values = append(q.values, v) + return nil + } + + code, err := d.PeekCode() + if err != nil { + return err + } + + switch { + case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): + err = d.queryMapKey(q) + case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): + err = d.queryArrayIndex(q) + default: + err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) + } + return err +} + +func (d *Decoder) queryMapKey(q *queryResult) error { + n, err := d.DecodeMapLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + for i := 0; i < n; i++ { + k, err := d.bytesNoCopy() + if err != nil { + return err + } + + if string(k) == q.key { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext((n - i - 1) * 2) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) queryArrayIndex(q *queryResult) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + if q.key == "*" { + q.hasAsterisk = true + + query := q.query + for i := 0; i < n; i++ { + q.query = query + if err := d.query(q); err != nil { + return err + } + } + + q.hasAsterisk = false + return nil + } + + ind, err := strconv.Atoi(q.key) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if i == ind { + if err := d.query(q); err != nil { + return err + } + if q.hasAsterisk { + return d.skipNext(n - i - 1) + } + return nil + } + + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) skipNext(n int) error { + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/decode_slice.go new file mode 100644 index 000000000..778d80b93 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_slice.go @@ -0,0 +1,193 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +const sliceElemsAllocLimit = 1e4 + +var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) + +// DecodeArrayLen decodes array length. Length is -1 when array is nil. +func (d *Decoder) DecodeArrayLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.arrayLen(c) +} + +func (d *Decoder) arrayLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { + return int(c & codes.FixedArrayMask), nil + } + switch c { + case codes.Array16: + n, err := d.uint16() + return int(n), err + case codes.Array32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) +} + +func decodeStringSliceValue(d *Decoder, v reflect.Value) error { + ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) + return d.decodeStringSlicePtr(ptr) +} + +func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + if n == -1 { + return nil + } + + ss := setStringsCap(*ptr, n) + for i := 0; i < n; i++ { + s, err := d.DecodeString() + if err != nil { + return err + } + ss = append(ss, s) + } + *ptr = ss + + return nil +} + +func setStringsCap(s []string, n int) []string { + if n > sliceElemsAllocLimit { + n = sliceElemsAllocLimit + } + + if s == nil { + return make([]string, 0, n) + } + + if cap(s) >= n { + return s[:0] + } + + s = s[:cap(s)] + s = append(s, make([]string, n-len(s))...) + return s[:0] +} + +func decodeSliceValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + v.Set(reflect.Zero(v.Type())) + return nil + } + if n == 0 && v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + return nil + } + + if v.Cap() >= n { + v.Set(v.Slice(0, n)) + } else if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Cap())) + } + + for i := 0; i < n; i++ { + if i >= v.Len() { + v.Set(growSliceValue(v, n)) + } + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func growSliceValue(v reflect.Value, n int) reflect.Value { + diff := n - v.Len() + if diff > sliceElemsAllocLimit { + diff = sliceElemsAllocLimit + } + v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) + return v +} + +func decodeArrayValue(d *Decoder, v reflect.Value) error { + n, err := d.DecodeArrayLen() + if err != nil { + return err + } + + if n == -1 { + return nil + } + + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + for i := 0; i < n; i++ { + sv := v.Index(i) + if err := d.DecodeValue(sv); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) DecodeSlice() ([]interface{}, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.decodeSlice(c) +} + +func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { + n, err := d.arrayLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + + s := make([]interface{}, 0, min(n, sliceElemsAllocLimit)) + for i := 0; i < n; i++ { + v, err := d.decodeInterfaceCond() + if err != nil { + return nil, err + } + s = append(s, v) + } + + return s, nil +} + +func (d *Decoder) skipSlice(c codes.Code) error { + n, err := d.arrayLen(c) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + if err := d.Skip(); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_string.go b/vendor/github.com/vmihailenco/msgpack/decode_string.go new file mode 100644 index 000000000..5402022ee --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_string.go @@ -0,0 +1,175 @@ +package msgpack + +import ( + "fmt" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func (d *Decoder) bytesLen(c codes.Code) (int, error) { + if c == codes.Nil { + return -1, nil + } else if codes.IsFixedString(c) { + return int(c & codes.FixedStrMask), nil + } + switch c { + case codes.Str8, codes.Bin8: + n, err := d.uint8() + return int(n), err + case codes.Str16, codes.Bin16: + n, err := d.uint16() + return int(n), err + case codes.Str32, codes.Bin32: + n, err := d.uint32() + return int(n), err + } + return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) +} + +func (d *Decoder) DecodeString() (string, error) { + c, err := d.readCode() + if err != nil { + return "", err + } + return d.string(c) +} + +func (d *Decoder) string(c codes.Code) (string, error) { + n, err := d.bytesLen(c) + if err != nil { + return "", err + } + if n == -1 { + return "", nil + } + b, err := d.readN(n) + return string(b), err +} + +func decodeStringValue(d *Decoder, v reflect.Value) error { + s, err := d.DecodeString() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetString(s) + return nil +} + +func (d *Decoder) DecodeBytesLen() (int, error) { + c, err := d.readCode() + if err != nil { + return 0, err + } + return d.bytesLen(c) +} + +func (d *Decoder) DecodeBytes() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + return d.bytes(c, nil) +} + +func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return readN(d.r, b, n) +} + +func (d *Decoder) bytesNoCopy() ([]byte, error) { + c, err := d.readCode() + if err != nil { + return nil, err + } + n, err := d.bytesLen(c) + if err != nil { + return nil, err + } + if n == -1 { + return nil, nil + } + return d.readN(n) +} + +func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { + c, err := d.readCode() + if err != nil { + return err + } + return d.bytesPtr(c, ptr) +} + +func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + *ptr = nil + return nil + } + + *ptr, err = readN(d.r, *ptr, n) + return err +} + +func (d *Decoder) skipBytes(c codes.Code) error { + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + return d.skipN(n) +} + +func decodeBytesValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + b, err := d.bytes(c, v.Bytes()) + if err != nil { + return err + } + + if err = mustSet(v); err != nil { + return err + } + v.SetBytes(b) + + return nil +} + +func decodeByteArrayValue(d *Decoder, v reflect.Value) error { + c, err := d.readCode() + if err != nil { + return err + } + + n, err := d.bytesLen(c) + if err != nil { + return err + } + if n == -1 { + return nil + } + if n > v.Len() { + return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) + } + + b := v.Slice(0, n).Bytes() + return d.readFull(b) +} diff --git a/vendor/github.com/vmihailenco/msgpack/decode_value.go b/vendor/github.com/vmihailenco/msgpack/decode_value.go new file mode 100644 index 000000000..7b858b5fc --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/decode_value.go @@ -0,0 +1,234 @@ +package msgpack + +import ( + "errors" + "fmt" + "reflect" +) + +var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() +var stringType = reflect.TypeOf((*string)(nil)).Elem() + +var valueDecoders []decoderFunc + +func init() { + valueDecoders = []decoderFunc{ + reflect.Bool: decodeBoolValue, + reflect.Int: decodeInt64Value, + reflect.Int8: decodeInt64Value, + reflect.Int16: decodeInt64Value, + reflect.Int32: decodeInt64Value, + reflect.Int64: decodeInt64Value, + reflect.Uint: decodeUint64Value, + reflect.Uint8: decodeUint64Value, + reflect.Uint16: decodeUint64Value, + reflect.Uint32: decodeUint64Value, + reflect.Uint64: decodeUint64Value, + reflect.Float32: decodeFloat32Value, + reflect.Float64: decodeFloat64Value, + reflect.Complex64: decodeUnsupportedValue, + reflect.Complex128: decodeUnsupportedValue, + reflect.Array: decodeArrayValue, + reflect.Chan: decodeUnsupportedValue, + reflect.Func: decodeUnsupportedValue, + reflect.Interface: decodeInterfaceValue, + reflect.Map: decodeMapValue, + reflect.Ptr: decodeUnsupportedValue, + reflect.Slice: decodeSliceValue, + reflect.String: decodeStringValue, + reflect.Struct: decodeStructValue, + reflect.UnsafePointer: decodeUnsupportedValue, + } +} + +func mustSet(v reflect.Value) error { + if !v.CanSet() { + return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) + } + return nil +} + +func getDecoder(typ reflect.Type) decoderFunc { + kind := typ.Kind() + + decoder, ok := typDecMap[typ] + if ok { + return decoder + } + + if typ.Implements(customDecoderType) { + return decodeCustomValue + } + if typ.Implements(unmarshalerType) { + return unmarshalValue + } + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customDecoderType) { + return decodeCustomValueAddr + } + if ptr.Implements(unmarshalerType) { + return unmarshalValueAddr + } + } + + switch kind { + case reflect.Ptr: + return ptrDecoderFunc(typ) + case reflect.Slice: + elem := typ.Elem() + switch elem.Kind() { + case reflect.Uint8: + return decodeBytesValue + } + switch elem { + case stringType: + return decodeStringSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return decodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return decodeMapStringStringValue + case interfaceType: + return decodeMapStringInterfaceValue + } + } + } + return valueDecoders[kind] +} + +func ptrDecoderFunc(typ reflect.Type) decoderFunc { + decoder := getDecoder(typ.Elem()) + return func(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + if err := mustSet(v); err != nil { + return err + } + if !v.IsNil() { + v.Set(reflect.Zero(v.Type())) + } + return d.DecodeNil() + } + if v.IsNil() { + if err := mustSet(v); err != nil { + return err + } + v.Set(reflect.New(v.Type().Elem())) + } + return decoder(d, v.Elem()) + } +} + +func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return decodeCustomValue(d, v.Addr()) +} + +func decodeCustomValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + decoder := v.Interface().(CustomDecoder) + return decoder.DecodeMsgpack(d) +} + +func unmarshalValueAddr(d *Decoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + } + return unmarshalValue(d, v.Addr()) +} + +func unmarshalValue(d *Decoder, v reflect.Value) error { + if d.hasNilCode() { + return d.decodeNilValue(v) + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + + if d.extLen != 0 { + b, err := d.readN(d.extLen) + if err != nil { + return err + } + d.rec = b + } else { + d.rec = makeBuffer() + if err := d.Skip(); err != nil { + return err + } + } + + unmarshaler := v.Interface().(Unmarshaler) + err := unmarshaler.UnmarshalMsgpack(d.rec) + d.rec = nil + return err +} + +func decodeBoolValue(d *Decoder, v reflect.Value) error { + flag, err := d.DecodeBool() + if err != nil { + return err + } + if err = mustSet(v); err != nil { + return err + } + v.SetBool(flag) + return nil +} + +func decodeInterfaceValue(d *Decoder, v reflect.Value) error { + if v.IsNil() { + return d.interfaceValue(v) + } + + elem := v.Elem() + if !elem.CanAddr() { + if d.hasNilCode() { + v.Set(reflect.Zero(v.Type())) + return d.DecodeNil() + } + } + + return d.DecodeValue(elem) +} + +func (d *Decoder) interfaceValue(v reflect.Value) error { + vv, err := d.decodeInterfaceCond() + if err != nil { + return err + } + + if vv != nil { + if v.Type() == errorType { + if vv, ok := vv.(string); ok { + v.Set(reflect.ValueOf(errors.New(vv))) + return nil + } + } + + v.Set(reflect.ValueOf(vv)) + } + + return nil +} + +func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode.go b/vendor/github.com/vmihailenco/msgpack/encode.go new file mode 100644 index 000000000..c2bb23cde --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode.go @@ -0,0 +1,170 @@ +package msgpack + +import ( + "bytes" + "io" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +type writer interface { + io.Writer + WriteByte(byte) error + WriteString(string) (int, error) +} + +type byteWriter struct { + io.Writer + + buf []byte + bootstrap [64]byte +} + +func newByteWriter(w io.Writer) *byteWriter { + bw := &byteWriter{ + Writer: w, + } + bw.buf = bw.bootstrap[:] + return bw +} + +func (w *byteWriter) WriteByte(c byte) error { + w.buf = w.buf[:1] + w.buf[0] = c + _, err := w.Write(w.buf) + return err +} + +func (w *byteWriter) WriteString(s string) (int, error) { + w.buf = append(w.buf[:0], s...) + return w.Write(w.buf) +} + +// Marshal returns the MessagePack encoding of v. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + err := NewEncoder(&buf).Encode(v) + return buf.Bytes(), err +} + +type Encoder struct { + w writer + buf []byte + + sortMapKeys bool + structAsArray bool + useJSONTag bool + useCompact bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + bw, ok := w.(writer) + if !ok { + bw = newByteWriter(w) + } + return &Encoder{ + w: bw, + buf: make([]byte, 9), + } +} + +// SortMapKeys causes the Encoder to encode map keys in increasing order. +// Supported map types are: +// - map[string]string +// - map[string]interface{} +func (e *Encoder) SortMapKeys(flag bool) *Encoder { + e.sortMapKeys = flag + return e +} + +// StructAsArray causes the Encoder to encode Go structs as MessagePack arrays. +func (e *Encoder) StructAsArray(flag bool) *Encoder { + e.structAsArray = flag + return e +} + +// UseJSONTag causes the Encoder to use json struct tag as fallback option +// if there is no msgpack tag. +func (e *Encoder) UseJSONTag(flag bool) *Encoder { + e.useJSONTag = flag + return e +} + +// UseCompactEncoding causes the Encoder to chose the most compact encoding. +// For example, it allows to encode Go int64 as msgpack int8 saving 7 bytes. +func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { + e.useCompact = flag + return e +} + +func (e *Encoder) Encode(v interface{}) error { + switch v := v.(type) { + case nil: + return e.EncodeNil() + case string: + return e.EncodeString(v) + case []byte: + return e.EncodeBytes(v) + case int: + return e.encodeInt64Cond(int64(v)) + case int64: + return e.encodeInt64Cond(v) + case uint: + return e.encodeUint64Cond(uint64(v)) + case uint64: + return e.encodeUint64Cond(v) + case bool: + return e.EncodeBool(v) + case float32: + return e.EncodeFloat32(v) + case float64: + return e.EncodeFloat64(v) + case time.Duration: + return e.encodeInt64Cond(int64(v)) + case time.Time: + return e.EncodeTime(v) + } + return e.EncodeValue(reflect.ValueOf(v)) +} + +func (e *Encoder) EncodeMulti(v ...interface{}) error { + for _, vv := range v { + if err := e.Encode(vv); err != nil { + return err + } + } + return nil +} + +func (e *Encoder) EncodeValue(v reflect.Value) error { + fn := getEncoder(v.Type()) + return fn(e, v) +} + +func (e *Encoder) EncodeNil() error { + return e.writeCode(codes.Nil) +} + +func (e *Encoder) EncodeBool(value bool) error { + if value { + return e.writeCode(codes.True) + } + return e.writeCode(codes.False) +} + +func (e *Encoder) writeCode(c codes.Code) error { + return e.w.WriteByte(byte(c)) +} + +func (e *Encoder) write(b []byte) error { + _, err := e.w.Write(b) + return err +} + +func (e *Encoder) writeString(s string) error { + _, err := e.w.WriteString(s) + return err +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_map.go b/vendor/github.com/vmihailenco/msgpack/encode_map.go new file mode 100644 index 000000000..a87c4075f --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_map.go @@ -0,0 +1,172 @@ +package msgpack + +import ( + "reflect" + "sort" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeMapValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + for _, key := range v.MapKeys() { + if err := e.EncodeValue(key); err != nil { + return err + } + if err := e.EncodeValue(v.MapIndex(key)); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringStringType).Interface().(map[string]string) + if e.sortMapKeys { + return e.encodeSortedMapStringString(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeString(mv); err != nil { + return err + } + } + + return nil +} + +func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) + if e.sortMapKeys { + return e.encodeSortedMapStringInterface(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.Encode(mv); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeString(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.Encode(m[k]); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) EncodeMapLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedMapLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Map16, uint16(l)) + } + return e.write4(codes.Map32, uint32(l)) +} + +func encodeStructValue(e *Encoder, strct reflect.Value) error { + var structFields *fields + if e.useJSONTag { + structFields = jsonStructs.Fields(strct.Type()) + } else { + structFields = structs.Fields(strct.Type()) + } + + if e.structAsArray || structFields.AsArray { + return encodeStructValueAsArray(e, strct, structFields.List) + } + fields := structFields.OmitEmpty(strct) + + if err := e.EncodeMapLen(len(fields)); err != nil { + return err + } + + for _, f := range fields { + if err := e.EncodeString(f.name); err != nil { + return err + } + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + + return nil +} + +func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { + if err := e.EncodeArrayLen(len(fields)); err != nil { + return err + } + for _, f := range fields { + if err := f.EncodeValue(e, strct); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_number.go b/vendor/github.com/vmihailenco/msgpack/encode_number.go new file mode 100644 index 000000000..dd7db6fdd --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_number.go @@ -0,0 +1,230 @@ +package msgpack + +import ( + "math" + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeUint8(n uint8) error { + return e.write1(codes.Uint8, n) +} + +func (e *Encoder) encodeUint8Cond(n uint8) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint8(n) +} + +// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeUint16(n uint16) error { + return e.write2(codes.Uint16, n) +} + +func (e *Encoder) encodeUint16Cond(n uint16) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint16(n) +} + +// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeUint32(n uint32) error { + return e.write4(codes.Uint32, n) +} + +func (e *Encoder) encodeUint32Cond(n uint32) error { + if e.useCompact { + return e.EncodeUint(uint64(n)) + } + return e.EncodeUint32(n) +} + +// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeUint64(n uint64) error { + return e.write8(codes.Uint64, n) +} + +func (e *Encoder) encodeUint64Cond(n uint64) error { + if e.useCompact { + return e.EncodeUint(n) + } + return e.EncodeUint64(n) +} + +// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. +func (e *Encoder) EncodeInt8(n int8) error { + return e.write1(codes.Int8, uint8(n)) +} + +func (e *Encoder) encodeInt8Cond(n int8) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt8(n) +} + +// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. +func (e *Encoder) EncodeInt16(n int16) error { + return e.write2(codes.Int16, uint16(n)) +} + +func (e *Encoder) encodeInt16Cond(n int16) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt16(n) +} + +// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. +func (e *Encoder) EncodeInt32(n int32) error { + return e.write4(codes.Int32, uint32(n)) +} + +func (e *Encoder) encodeInt32Cond(n int32) error { + if e.useCompact { + return e.EncodeInt(int64(n)) + } + return e.EncodeInt32(n) +} + +// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. +func (e *Encoder) EncodeInt64(n int64) error { + return e.write8(codes.Int64, uint64(n)) +} + +func (e *Encoder) encodeInt64Cond(n int64) error { + if e.useCompact { + return e.EncodeInt(n) + } + return e.EncodeInt64(n) +} + +// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. +// Type of the number is lost during encoding. +func (e *Encoder) EncodeUint(n uint64) error { + if n <= math.MaxInt8 { + return e.w.WriteByte(byte(n)) + } + if n <= math.MaxUint8 { + return e.EncodeUint8(uint8(n)) + } + if n <= math.MaxUint16 { + return e.EncodeUint16(uint16(n)) + } + if n <= math.MaxUint32 { + return e.EncodeUint32(uint32(n)) + } + return e.EncodeUint64(uint64(n)) +} + +// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. +// Type of number is lost during encoding. +func (e *Encoder) EncodeInt(n int64) error { + if n >= 0 { + return e.EncodeUint(uint64(n)) + } + if n >= int64(int8(codes.NegFixedNumLow)) { + return e.w.WriteByte(byte(n)) + } + if n >= math.MinInt8 { + return e.EncodeInt8(int8(n)) + } + if n >= math.MinInt16 { + return e.EncodeInt16(int16(n)) + } + if n >= math.MinInt32 { + return e.EncodeInt32(int32(n)) + } + return e.EncodeInt64(int64(n)) +} + +func (e *Encoder) EncodeFloat32(n float32) error { + return e.write4(codes.Float, math.Float32bits(n)) +} + +func (e *Encoder) EncodeFloat64(n float64) error { + return e.write8(codes.Double, math.Float64bits(n)) +} + +func (e *Encoder) write1(code codes.Code, n uint8) error { + e.buf = e.buf[:2] + e.buf[0] = byte(code) + e.buf[1] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write2(code codes.Code, n uint16) error { + e.buf = e.buf[:3] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 8) + e.buf[2] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write4(code codes.Code, n uint32) error { + e.buf = e.buf[:5] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 24) + e.buf[2] = byte(n >> 16) + e.buf[3] = byte(n >> 8) + e.buf[4] = byte(n) + return e.write(e.buf) +} + +func (e *Encoder) write8(code codes.Code, n uint64) error { + e.buf = e.buf[:9] + e.buf[0] = byte(code) + e.buf[1] = byte(n >> 56) + e.buf[2] = byte(n >> 48) + e.buf[3] = byte(n >> 40) + e.buf[4] = byte(n >> 32) + e.buf[5] = byte(n >> 24) + e.buf[6] = byte(n >> 16) + e.buf[7] = byte(n >> 8) + e.buf[8] = byte(n) + return e.write(e.buf) +} + +func encodeUint8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint8Cond(uint8(v.Uint())) +} + +func encodeUint16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint16Cond(uint16(v.Uint())) +} + +func encodeUint32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint32Cond(uint32(v.Uint())) +} + +func encodeUint64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeUint64Cond(v.Uint()) +} + +func encodeInt8CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt8Cond(int8(v.Int())) +} + +func encodeInt16CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt16Cond(int16(v.Int())) +} + +func encodeInt32CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt32Cond(int32(v.Int())) +} + +func encodeInt64CondValue(e *Encoder, v reflect.Value) error { + return e.encodeInt64Cond(v.Int()) +} + +func encodeFloat32Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat32(float32(v.Float())) +} + +func encodeFloat64Value(e *Encoder, v reflect.Value) error { + return e.EncodeFloat64(v.Float()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/encode_slice.go new file mode 100644 index 000000000..5ddbd6311 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_slice.go @@ -0,0 +1,124 @@ +package msgpack + +import ( + "reflect" + + "github.com/vmihailenco/msgpack/codes" +) + +func encodeStringValue(e *Encoder, v reflect.Value) error { + return e.EncodeString(v.String()) +} + +func encodeByteSliceValue(e *Encoder, v reflect.Value) error { + return e.EncodeBytes(v.Bytes()) +} + +func encodeByteArrayValue(e *Encoder, v reflect.Value) error { + if err := e.EncodeBytesLen(v.Len()); err != nil { + return err + } + + if v.CanAddr() { + b := v.Slice(0, v.Len()).Bytes() + return e.write(b) + } + + e.buf = grow(e.buf, v.Len()) + reflect.Copy(reflect.ValueOf(e.buf), v) + return e.write(e.buf) +} + +func grow(b []byte, n int) []byte { + if cap(b) >= n { + return b[:n] + } + b = b[:cap(b)] + b = append(b, make([]byte, n-len(b))...) + return b +} + +func (e *Encoder) EncodeBytesLen(l int) error { + if l < 256 { + return e.write1(codes.Bin8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Bin16, uint16(l)) + } + return e.write4(codes.Bin32, uint32(l)) +} + +func (e *Encoder) encodeStrLen(l int) error { + if l < 32 { + return e.writeCode(codes.FixedStrLow | codes.Code(l)) + } + if l < 256 { + return e.write1(codes.Str8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Str16, uint16(l)) + } + return e.write4(codes.Str32, uint32(l)) +} + +func (e *Encoder) EncodeString(v string) error { + if err := e.encodeStrLen(len(v)); err != nil { + return err + } + return e.writeString(v) +} + +func (e *Encoder) EncodeBytes(v []byte) error { + if v == nil { + return e.EncodeNil() + } + if err := e.EncodeBytesLen(len(v)); err != nil { + return err + } + return e.write(v) +} + +func (e *Encoder) EncodeArrayLen(l int) error { + if l < 16 { + return e.writeCode(codes.FixedArrayLow | codes.Code(l)) + } + if l < 65536 { + return e.write2(codes.Array16, uint16(l)) + } + return e.write4(codes.Array32, uint32(l)) +} + +func (e *Encoder) encodeStringSlice(s []string) error { + if s == nil { + return e.EncodeNil() + } + if err := e.EncodeArrayLen(len(s)); err != nil { + return err + } + for _, v := range s { + if err := e.EncodeString(v); err != nil { + return err + } + } + return nil +} + +func encodeSliceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encodeArrayValue(e, v) +} + +func encodeArrayValue(e *Encoder, v reflect.Value) error { + l := v.Len() + if err := e.EncodeArrayLen(l); err != nil { + return err + } + for i := 0; i < l; i++ { + if err := e.EncodeValue(v.Index(i)); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/encode_value.go b/vendor/github.com/vmihailenco/msgpack/encode_value.go new file mode 100644 index 000000000..b46ab02a1 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/encode_value.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "fmt" + "reflect" +) + +var valueEncoders []encoderFunc + +func init() { + valueEncoders = []encoderFunc{ + reflect.Bool: encodeBoolValue, + reflect.Int: encodeInt64CondValue, + reflect.Int8: encodeInt8CondValue, + reflect.Int16: encodeInt16CondValue, + reflect.Int32: encodeInt32CondValue, + reflect.Int64: encodeInt64CondValue, + reflect.Uint: encodeUint64CondValue, + reflect.Uint8: encodeUint8CondValue, + reflect.Uint16: encodeUint16CondValue, + reflect.Uint32: encodeUint32CondValue, + reflect.Uint64: encodeUint64CondValue, + reflect.Float32: encodeFloat32Value, + reflect.Float64: encodeFloat64Value, + reflect.Complex64: encodeUnsupportedValue, + reflect.Complex128: encodeUnsupportedValue, + reflect.Array: encodeArrayValue, + reflect.Chan: encodeUnsupportedValue, + reflect.Func: encodeUnsupportedValue, + reflect.Interface: encodeInterfaceValue, + reflect.Map: encodeMapValue, + reflect.Ptr: encodeUnsupportedValue, + reflect.Slice: encodeSliceValue, + reflect.String: encodeStringValue, + reflect.Struct: encodeStructValue, + reflect.UnsafePointer: encodeUnsupportedValue, + } +} + +func getEncoder(typ reflect.Type) encoderFunc { + if encoder, ok := typEncMap[typ]; ok { + return encoder + } + + if typ.Implements(customEncoderType) { + return encodeCustomValue + } + if typ.Implements(marshalerType) { + return marshalValue + } + + kind := typ.Kind() + + // Addressable struct field value. + if kind != reflect.Ptr { + ptr := reflect.PtrTo(typ) + if ptr.Implements(customEncoderType) { + return encodeCustomValuePtr + } + if ptr.Implements(marshalerType) { + return marshalValuePtr + } + } + + if typ == errorType { + return encodeErrorValue + } + + switch kind { + case reflect.Ptr: + return ptrEncoderFunc(typ) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteSliceValue + } + case reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + return encodeByteArrayValue + } + case reflect.Map: + if typ.Key() == stringType { + switch typ.Elem() { + case stringType: + return encodeMapStringStringValue + case interfaceType: + return encodeMapStringInterfaceValue + } + } + } + return valueEncoders[kind] +} + +func ptrEncoderFunc(typ reflect.Type) encoderFunc { + encoder := getEncoder(typ.Elem()) + return func(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return encoder(e, v.Elem()) + } +} + +func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + encoder := v.Addr().Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func encodeCustomValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + encoder := v.Interface().(CustomEncoder) + return encoder.EncodeMsgpack(e) +} + +func marshalValuePtr(e *Encoder, v reflect.Value) error { + if !v.CanAddr() { + return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) + } + return marshalValue(e, v.Addr()) +} + +func marshalValue(e *Encoder, v reflect.Value) error { + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return e.EncodeNil() + } + } + + marshaler := v.Interface().(Marshaler) + b, err := marshaler.MarshalMsgpack() + if err != nil { + return err + } + _, err = e.w.Write(b) + return err +} + +func encodeBoolValue(e *Encoder, v reflect.Value) error { + return e.EncodeBool(v.Bool()) +} + +func encodeInterfaceValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeValue(v.Elem()) +} + +func encodeErrorValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + return e.EncodeString(v.Interface().(error).Error()) +} + +func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { + return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/vmihailenco/msgpack/ext.go b/vendor/github.com/vmihailenco/msgpack/ext.go new file mode 100644 index 000000000..7970be852 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/ext.go @@ -0,0 +1,238 @@ +package msgpack + +import ( + "bytes" + "fmt" + "reflect" + "sync" + + "github.com/vmihailenco/msgpack/codes" +) + +var extTypes = make(map[int8]reflect.Type) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// RegisterExt records a type, identified by a value for that type, +// under the provided id. That id will identify the concrete type of a value +// sent or received as an interface variable. Only types that will be +// transferred as implementations of interface values need to be registered. +// Expecting to be used only during initialization, it panics if the mapping +// between types and ids is not a bijection. +func RegisterExt(id int8, value interface{}) { + typ := reflect.TypeOf(value) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + ptr := reflect.PtrTo(typ) + + if _, ok := extTypes[id]; ok { + panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) + } + + registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) + registerExt(id, typ, getEncoder(typ), getDecoder(typ)) +} + +func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { + if dec != nil { + extTypes[id] = typ + } + if enc != nil { + typEncMap[typ] = makeExtEncoder(id, enc) + } + if dec != nil { + typDecMap[typ] = makeExtDecoder(id, dec) + } +} + +func (e *Encoder) EncodeExtHeader(typeId int8, length int) error { + if err := e.encodeExtLen(length); err != nil { + return err + } + if err := e.w.WriteByte(byte(typeId)); err != nil { + return err + } + return nil +} + +func makeExtEncoder(typeId int8, enc encoderFunc) encoderFunc { + return func(e *Encoder, v reflect.Value) error { + buf := bufferPool.Get().(*bytes.Buffer) + defer bufferPool.Put(buf) + buf.Reset() + + oldw := e.w + e.w = buf + err := enc(e, v) + e.w = oldw + + if err != nil { + return err + } + + err = e.EncodeExtHeader(typeId, buf.Len()) + if err != nil { + return err + } + return e.write(buf.Bytes()) + } +} + +func makeExtDecoder(typeId int8, dec decoderFunc) decoderFunc { + return func(d *Decoder, v reflect.Value) error { + c, err := d.PeekCode() + if err != nil { + return err + } + + if !codes.IsExt(c) { + return dec(d, v) + } + + id, extLen, err := d.DecodeExtHeader() + if err != nil { + return err + } + + if id != typeId { + return fmt.Errorf("msgpack: got ext type=%d, wanted %d", int8(c), typeId) + } + + d.extLen = extLen + return dec(d, v) + } +} + +func (e *Encoder) encodeExtLen(l int) error { + switch l { + case 1: + return e.writeCode(codes.FixExt1) + case 2: + return e.writeCode(codes.FixExt2) + case 4: + return e.writeCode(codes.FixExt4) + case 8: + return e.writeCode(codes.FixExt8) + case 16: + return e.writeCode(codes.FixExt16) + } + if l < 256 { + return e.write1(codes.Ext8, uint8(l)) + } + if l < 65536 { + return e.write2(codes.Ext16, uint16(l)) + } + return e.write4(codes.Ext32, uint32(l)) +} + +func (d *Decoder) parseExtLen(c codes.Code) (int, error) { + switch c { + case codes.FixExt1: + return 1, nil + case codes.FixExt2: + return 2, nil + case codes.FixExt4: + return 4, nil + case codes.FixExt8: + return 8, nil + case codes.FixExt16: + return 16, nil + case codes.Ext8: + n, err := d.uint8() + return int(n), err + case codes.Ext16: + n, err := d.uint16() + return int(n), err + case codes.Ext32: + n, err := d.uint32() + return int(n), err + default: + return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) + } +} + +func (d *Decoder) decodeExtHeader(c codes.Code) (int8, int, error) { + length, err := d.parseExtLen(c) + if err != nil { + return 0, 0, err + } + + typeId, err := d.readCode() + if err != nil { + return 0, 0, err + } + + return int8(typeId), length, nil +} + +func (d *Decoder) DecodeExtHeader() (typeId int8, length int, err error) { + c, err := d.readCode() + if err != nil { + return + } + return d.decodeExtHeader(c) +} + +func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { + extId, extLen, err := d.decodeExtHeader(c) + if err != nil { + return nil, err + } + + typ, ok := extTypes[extId] + if !ok { + return nil, fmt.Errorf("msgpack: unregistered ext id=%d", extId) + } + + v := reflect.New(typ) + + d.extLen = extLen + err = d.DecodeValue(v.Elem()) + d.extLen = 0 + if err != nil { + return nil, err + } + + return v.Interface(), nil +} + +func (d *Decoder) skipExt(c codes.Code) error { + n, err := d.parseExtLen(c) + if err != nil { + return err + } + return d.skipN(n + 1) +} + +func (d *Decoder) skipExtHeader(c codes.Code) error { + // Read ext type. + _, err := d.readCode() + if err != nil { + return err + } + // Read ext body len. + for i := 0; i < extHeaderLen(c); i++ { + _, err := d.readCode() + if err != nil { + return err + } + } + return nil +} + +func extHeaderLen(c codes.Code) int { + switch c { + case codes.Ext8: + return 1 + case codes.Ext16: + return 2 + case codes.Ext32: + return 4 + } + return 0 +} diff --git a/vendor/github.com/vmihailenco/msgpack/msgpack.go b/vendor/github.com/vmihailenco/msgpack/msgpack.go new file mode 100644 index 000000000..220b43c47 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/msgpack.go @@ -0,0 +1,17 @@ +package msgpack + +type Marshaler interface { + MarshalMsgpack() ([]byte, error) +} + +type Unmarshaler interface { + UnmarshalMsgpack([]byte) error +} + +type CustomEncoder interface { + EncodeMsgpack(*Encoder) error +} + +type CustomDecoder interface { + DecodeMsgpack(*Decoder) error +} diff --git a/vendor/github.com/vmihailenco/msgpack/tag.go b/vendor/github.com/vmihailenco/msgpack/tag.go new file mode 100644 index 000000000..48e6f942c --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/tag.go @@ -0,0 +1,42 @@ +package msgpack + +import ( + "strings" +) + +type tagOptions string + +func (o tagOptions) Get(name string) (string, bool) { + s := string(o) + for len(s) > 0 { + var next string + idx := strings.IndexByte(s, ',') + if idx >= 0 { + s, next = s[:idx], s[idx+1:] + } + if strings.HasPrefix(s, name) { + return s[len(name):], true + } + s = next + } + return "", false +} + +func (o tagOptions) Contains(name string) bool { + _, ok := o.Get(name) + return ok +} + +func parseTag(tag string) (string, tagOptions) { + if idx := strings.IndexByte(tag, ','); idx != -1 { + name := tag[:idx] + if strings.IndexByte(name, ':') == -1 { + return name, tagOptions(tag[idx+1:]) + } + } + + if strings.IndexByte(tag, ':') == -1 { + return tag, "" + } + return "", tagOptions(tag) +} diff --git a/vendor/github.com/vmihailenco/msgpack/time.go b/vendor/github.com/vmihailenco/msgpack/time.go new file mode 100644 index 000000000..f7409690a --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/time.go @@ -0,0 +1,145 @@ +package msgpack + +import ( + "encoding/binary" + "fmt" + "reflect" + "time" + + "github.com/vmihailenco/msgpack/codes" +) + +var timeExtId int8 = -1 + +func init() { + timeType := reflect.TypeOf((*time.Time)(nil)).Elem() + registerExt(timeExtId, timeType, encodeTimeValue, decodeTimeValue) +} + +func (e *Encoder) EncodeTime(tm time.Time) error { + b := e.encodeTime(tm) + if err := e.encodeExtLen(len(b)); err != nil { + return err + } + if err := e.w.WriteByte(byte(timeExtId)); err != nil { + return err + } + return e.write(b) +} + +func (e *Encoder) encodeTime(tm time.Time) []byte { + secs := uint64(tm.Unix()) + if secs>>34 == 0 { + data := uint64(tm.Nanosecond())<<34 | secs + if data&0xffffffff00000000 == 0 { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(data)) + return b + } else { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, data) + return b + } + } + + b := make([]byte, 12) + binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) + binary.BigEndian.PutUint64(b[4:], uint64(secs)) + return b +} + +func (d *Decoder) DecodeTime() (time.Time, error) { + tm, err := d.decodeTime() + if err != nil { + return tm, err + } + + if tm.IsZero() { + // Assume that zero time does not have timezone information. + return tm.UTC(), nil + } + return tm, nil +} + +func (d *Decoder) decodeTime() (time.Time, error) { + extLen := d.extLen + d.extLen = 0 + if extLen == 0 { + c, err := d.readCode() + if err != nil { + return time.Time{}, err + } + + // Legacy format. + if c == codes.FixedArrayLow|2 { + sec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + nsec, err := d.DecodeInt64() + if err != nil { + return time.Time{}, err + } + + return time.Unix(sec, nsec), nil + } + + if codes.IsString(c) { + s, err := d.string(c) + if err != nil { + return time.Time{}, err + } + return time.Parse(time.RFC3339Nano, s) + } + + extLen, err = d.parseExtLen(c) + if err != nil { + return time.Time{}, err + } + + // Skip ext id. + _, err = d.s.ReadByte() + if err != nil { + return time.Time{}, nil + } + } + + b, err := d.readN(extLen) + if err != nil { + return time.Time{}, err + } + + switch len(b) { + case 4: + sec := binary.BigEndian.Uint32(b) + return time.Unix(int64(sec), 0), nil + case 8: + sec := binary.BigEndian.Uint64(b) + nsec := int64(sec >> 34) + sec &= 0x00000003ffffffff + return time.Unix(int64(sec), nsec), nil + case 12: + nsec := binary.BigEndian.Uint32(b) + sec := binary.BigEndian.Uint64(b[4:]) + return time.Unix(int64(sec), int64(nsec)), nil + default: + err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) + return time.Time{}, err + } +} + +func encodeTimeValue(e *Encoder, v reflect.Value) error { + tm := v.Interface().(time.Time) + b := e.encodeTime(tm) + return e.write(b) +} + +func decodeTimeValue(d *Decoder, v reflect.Value) error { + tm, err := d.DecodeTime() + if err != nil { + return err + } + v.Set(reflect.ValueOf(tm)) + return nil +} diff --git a/vendor/github.com/vmihailenco/msgpack/types.go b/vendor/github.com/vmihailenco/msgpack/types.go new file mode 100644 index 000000000..6a1bf7f91 --- /dev/null +++ b/vendor/github.com/vmihailenco/msgpack/types.go @@ -0,0 +1,310 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +var customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() +var customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() + +var marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + +type encoderFunc func(*Encoder, reflect.Value) error +type decoderFunc func(*Decoder, reflect.Value) error + +var typEncMap = make(map[reflect.Type]encoderFunc) +var typDecMap = make(map[reflect.Type]decoderFunc) + +// Register registers encoder and decoder functions for a value. +// This is low level API and in most cases you should prefer implementing +// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. +func Register(value interface{}, enc encoderFunc, dec decoderFunc) { + typ := reflect.TypeOf(value) + if enc != nil { + typEncMap[typ] = enc + } + if dec != nil { + typDecMap[typ] = dec + } +} + +//------------------------------------------------------------------------------ + +var structs = newStructCache(false) +var jsonStructs = newStructCache(true) + +type structCache struct { + mu sync.RWMutex + m map[reflect.Type]*fields + + useJSONTag bool +} + +func newStructCache(useJSONTag bool) *structCache { + return &structCache{ + m: make(map[reflect.Type]*fields), + + useJSONTag: useJSONTag, + } +} + +func (m *structCache) Fields(typ reflect.Type) *fields { + m.mu.RLock() + fs, ok := m.m[typ] + m.mu.RUnlock() + if ok { + return fs + } + + m.mu.Lock() + fs, ok = m.m[typ] + if !ok { + fs = getFields(typ, m.useJSONTag) + m.m[typ] = fs + } + m.mu.Unlock() + + return fs +} + +//------------------------------------------------------------------------------ + +type field struct { + name string + index []int + omitEmpty bool + encoder encoderFunc + decoder decoderFunc +} + +func (f *field) value(v reflect.Value) reflect.Value { + return fieldByIndex(v, f.index) +} + +func (f *field) Omit(strct reflect.Value) bool { + return f.omitEmpty && isEmptyValue(f.value(strct)) +} + +func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { + return f.encoder(e, f.value(strct)) +} + +func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { + return f.decoder(d, f.value(strct)) +} + +//------------------------------------------------------------------------------ + +type fields struct { + Table map[string]*field + List []*field + AsArray bool + + hasOmitEmpty bool +} + +func newFields(numField int) *fields { + return &fields{ + Table: make(map[string]*field, numField), + List: make([]*field, 0, numField), + } +} + +func (fs *fields) Add(field *field) { + fs.Table[field.name] = field + fs.List = append(fs.List, field) + if field.omitEmpty { + fs.hasOmitEmpty = true + } +} + +func (fs *fields) OmitEmpty(strct reflect.Value) []*field { + if !fs.hasOmitEmpty { + return fs.List + } + + fields := make([]*field, 0, len(fs.List)) + for _, f := range fs.List { + if !f.Omit(strct) { + fields = append(fields, f) + } + } + return fields +} + +func getFields(typ reflect.Type, useJSONTag bool) *fields { + numField := typ.NumField() + fs := newFields(numField) + + var omitEmpty bool + for i := 0; i < numField; i++ { + f := typ.Field(i) + + tag := f.Tag.Get("msgpack") + if useJSONTag && tag == "" { + tag = f.Tag.Get("json") + } + + name, opt := parseTag(tag) + if name == "-" { + continue + } + + if f.Name == "_msgpack" { + if opt.Contains("asArray") { + fs.AsArray = true + } + if opt.Contains("omitempty") { + omitEmpty = true + } + } + + if f.PkgPath != "" && !f.Anonymous { + continue + } + + field := &field{ + name: name, + index: f.Index, + omitEmpty: omitEmpty || opt.Contains("omitempty"), + encoder: getEncoder(f.Type), + decoder: getDecoder(f.Type), + } + + if field.name == "" { + field.name = f.Name + } + + if f.Anonymous && !opt.Contains("noinline") { + inline := opt.Contains("inline") + if inline { + inlineFields(fs, f.Type, field, useJSONTag) + } else { + inline = autoinlineFields(fs, f.Type, field, useJSONTag) + } + if inline { + fs.Table[field.name] = field + continue + } + } + + fs.Add(field) + } + return fs +} + +var encodeStructValuePtr uintptr +var decodeStructValuePtr uintptr + +func init() { + encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() + decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() +} + +func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't inline shadowed fields. + continue + } + field.index = append(f.index, field.index...) + fs.Add(field) + } +} + +func autoinlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { + var encoder encoderFunc + var decoder decoderFunc + + if typ.Kind() == reflect.Struct { + encoder = f.encoder + decoder = f.decoder + } else { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + encoder = getEncoder(typ) + decoder = getDecoder(typ) + } + if typ.Kind() != reflect.Struct { + return false + } + } + + if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { + return false + } + if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { + return false + } + + inlinedFields := getFields(typ, useJSONTag).List + for _, field := range inlinedFields { + if _, ok := fs.Table[field.name]; ok { + // Don't auto inline if there are shadowed fields. + return false + } + } + + for _, field := range inlinedFields { + field.index = append(f.index, field.index...) + fs.Add(field) + } + return true +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + if len(index) == 1 { + return v.Field(index[0]) + } + for i, x := range index { + if i > 0 { + var ok bool + v, ok = indirectNew(v) + if !ok { + return v + } + } + v = v.Field(x) + } + return v +} + +func indirectNew(v reflect.Value) (reflect.Value, bool) { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + if !v.CanSet() { + return v, false + } + elemType := v.Type().Elem() + if elemType.Kind() != reflect.Struct { + return v, false + } + v.Set(reflect.New(elemType)) + } + v = v.Elem() + } + return v, true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml new file mode 100644 index 000000000..13ff99866 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.12 + diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md new file mode 100644 index 000000000..b3bc3b618 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md @@ -0,0 +1,10 @@ +# 1.0.1 (July 30, 2019) + +* The YAML decoder is now correctly treating quoted scalars as verbatim literal + strings rather than using the fuzzy type selection rules for them. Fuzzy + type selection rules still apply to unquoted scalars. + ([#4](https://github.com/zclconf/go-cty-yaml/pull/4)) + +# 1.0.0 (May 26, 2019) + +Initial release. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE new file mode 100644 index 000000000..4e6c00ab3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/NOTICE @@ -0,0 +1,20 @@ +This package is derived from gopkg.in/yaml.v2, which is copyright +2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Includes mechanical ports of code from libyaml, distributed under its original +license. See LICENSE.libyaml for more information. + +Modifications for cty interfacing copyright 2019 Martin Atkins, and +distributed under the same license terms. diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go new file mode 100644 index 000000000..1f7e87e67 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go new file mode 100644 index 000000000..a73b34a8b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/converter.go @@ -0,0 +1,69 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ConverterConfig is used to configure a new converter, using NewConverter. +type ConverterConfig struct { + // EncodeAsFlow, when set to true, causes Marshal to produce flow-style + // mapping and sequence serializations. + EncodeAsFlow bool +} + +// A Converter can marshal and unmarshal between cty values and YAML bytes. +// +// Because there are many different ways to map cty to YAML and vice-versa, +// a converter is configurable using the settings in ConverterConfig, which +// allow for a few different permutations of mapping to YAML. +// +// If you are just trying to work with generic, standard YAML, the predefined +// converter in Standard should be good enough. +type Converter struct { + encodeAsFlow bool +} + +// NewConverter creates a new Converter with the given configuration. +func NewConverter(config *ConverterConfig) *Converter { + return &Converter{ + encodeAsFlow: config.EncodeAsFlow, + } +} + +// Standard is a predefined Converter that produces and consumes generic YAML +// using only built-in constructs that any other YAML implementation ought to +// understand. +var Standard *Converter = NewConverter(&ConverterConfig{}) + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +func (c *Converter) ImpliedType(src []byte) (cty.Type, error) { + return c.impliedType(src) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func (c *Converter) Marshal(v cty.Value) ([]byte, error) { + return c.marshal(v) +} + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// An error is returned if the given source contains any YAML document +// delimiters. +func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return c.unmarshal(src, ty) +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go new file mode 100644 index 000000000..b91141cca --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go @@ -0,0 +1,57 @@ +package yaml + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code +// into a cty Value, using the ImpliedType and Unmarshal methods of the +// Standard pre-defined converter. +var YAMLDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "src", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + return cty.DynamicPseudoType, nil + } + if args[0].IsNull() { + return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null") + } + return Standard.ImpliedType([]byte(args[0].AsString())) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + return Standard.Unmarshal([]byte(args[0].AsString()), retType) + }, +}) + +// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value +// into YAML. +var YAMLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if !args[0].IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + raw, err := Standard.Marshal(args[0]) + if err != nil { + return cty.NilVal, err + } + return cty.StringVal(string(raw)), nil + }, +}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go new file mode 100644 index 000000000..e369ff27c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/decode.go @@ -0,0 +1,261 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilVal, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &valueAnalysis{ + anchorsPending: map[string]int{}, + anchorVals: map[string]cty.Value{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "missing start of document") + } + + v, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return convert.Convert(v, ty) +} + +func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilVal, parserError(p) + } + return c.unmarshalParseRemainder(an, &evt, p) +} + +func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.unmarshalScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.unmarshalAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.unmarshalMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.unmarshalSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilVal, parseEventErrorWrap(evt, err) + } + + if val.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + val = cty.StringVal("<<") + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, val) + } + return val, nil +} + +func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + vals := make(map[string]cty.Value) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + v := cty.ObjectVal(vals) + if anchor != "" { + an.completeAnchor(anchor, v) + } + return v, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilVal, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + ty := val.Type() + if !(ty.IsObjectType() || ty.IsMapType()) { + return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + vals[k.AsString()] = v + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known") + } + val, err := c.unmarshalParse(an, p) + if err != nil { + return cty.NilVal, err + } + + vals[keyVal.AsString()] = val + } +} + +func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var vals []cty.Value + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilVal, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.TupleVal(vals) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + val, err := c.unmarshalParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, val) + } +} + +func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) { + v, err := an.anchorVal(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return v, err +} + +type valueAnalysis struct { + anchorsPending map[string]int + anchorVals map[string]cty.Value +} + +func (an *valueAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *valueAnalysis) completeAnchor(name string, v cty.Value) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorVals[name] = v +} + +func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorVals[name] + if !ok { + return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go new file mode 100644 index 000000000..a1c2cc526 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go new file mode 100644 index 000000000..daa1478a9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/encode.go @@ -0,0 +1,189 @@ +package yaml + +import ( + "bytes" + "fmt" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +func (c *Converter) marshal(v cty.Value) ([]byte, error) { + var buf bytes.Buffer + + e := &yaml_emitter_t{} + yaml_emitter_initialize(e) + yaml_emitter_set_output_writer(e, &buf) + yaml_emitter_set_unicode(e, true) + + var evt yaml_event_t + yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_document_start_event_initialize(&evt, nil, nil, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + if err := c.marshalEmit(v, e); err != nil { + return nil, err + } + + yaml_document_end_event_initialize(&evt, true) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + yaml_stream_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return nil, emitterError(e) + } + + return buf.Bytes(), nil +} + +func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error { + ty := v.Type() + switch { + case v.IsNull(): + return c.marshalPrimitive(v, e) + case !v.IsKnown(): + return fmt.Errorf("cannot serialize unknown value as YAML") + case ty.IsPrimitiveType(): + return c.marshalPrimitive(v, e) + case ty.IsTupleType(), ty.IsListType(), ty.IsSetType(): + return c.marshalSequence(v, e) + case ty.IsObjectType(), ty.IsMapType(): + return c.marshalMapping(v, e) + default: + return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName()) + } +} + +func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error { + var evt yaml_event_t + + if v.IsNull() { + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte("null"), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil + } + + switch v.Type() { + case cty.String: + str := v.AsString() + style := yaml_DOUBLE_QUOTED_SCALAR_STYLE + if strings.Contains(str, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + style, + ) + case cty.Number: + str := v.AsBigFloat().Text('f', -1) + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + case cty.Bool: + var str string + switch v { + case cty.True: + str = "true" + case cty.False: + str = "false" + } + yaml_scalar_event_initialize( + &evt, + nil, + nil, + []byte(str), + true, + true, + yaml_PLAIN_SCALAR_STYLE, + ) + } + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_SEQUENCE_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_SEQUENCE_STYLE + } + + var evt yaml_event_t + yaml_sequence_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + _, v := it.Element() + err := c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_sequence_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} + +func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error { + style := yaml_BLOCK_MAPPING_STYLE + if c.encodeAsFlow { + style = yaml_FLOW_MAPPING_STYLE + } + + var evt yaml_event_t + yaml_mapping_start_event_initialize(&evt, nil, nil, true, style) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + + for it := v.ElementIterator(); it.Next(); { + k, v := it.Element() + err := c.marshalEmit(k, e) + if err != nil { + return err + } + err = c.marshalEmit(v, e) + if err != nil { + return err + } + } + + yaml_mapping_end_event_initialize(&evt) + if !yaml_emitter_emit(e, &evt) { + return emitterError(e) + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go new file mode 100644 index 000000000..ae41c488f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/error.go @@ -0,0 +1,97 @@ +package yaml + +import ( + "errors" + "fmt" +) + +// Error is an error implementation used to report errors that correspond to +// a particular position in an input buffer. +type Error struct { + cause error + Line, Column int +} + +func (e Error) Error() string { + return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error()) +} + +// Cause is an implementation of the interface used by +// github.com/pkg/errors.Cause, returning the underlying error without the +// position information. +func (e Error) Cause() error { + return e.cause +} + +// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper +// returning the underlying error without the position information. +func (e Error) WrappedErrors() []error { + return []error{e.cause} +} + +func parserError(p *yaml_parser_t) error { + var cause error + if len(p.problem) > 0 { + cause = errors.New(p.problem) + } else { + cause = errors.New("invalid YAML syntax") // useless generic error, then + } + + return parserErrorWrap(p, cause) +} + +func parserErrorWrap(p *yaml_parser_t, cause error) error { + switch { + case p.problem_mark.line != 0: + line := p.problem_mark.line + column := p.problem_mark.column + // Scanner errors don't iterate line before returning error + if p.error == yaml_SCANNER_ERROR { + line++ + column = 0 + } + return Error{ + cause: cause, + Line: line, + Column: column + 1, + } + case p.context_mark.line != 0: + return Error{ + cause: cause, + Line: p.context_mark.line, + Column: p.context_mark.column + 1, + } + default: + return cause + } +} + +func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error { + return parserErrorWrap(p, fmt.Errorf(f, vals...)) +} + +func parseEventErrorWrap(evt *yaml_event_t, cause error) error { + if evt.start_mark.line == 0 { + // Event does not have a start mark, so we won't wrap the error at all + return cause + } + return Error{ + cause: cause, + Line: evt.start_mark.line, + Column: evt.start_mark.column + 1, + } +} + +func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error { + return parseEventErrorWrap(evt, fmt.Errorf(f, vals...)) +} + +func emitterError(e *yaml_emitter_t) error { + var cause error + if len(e.problem) > 0 { + cause = errors.New(e.problem) + } else { + cause = errors.New("failed to write YAML token") // useless generic error, then + } + return cause +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod new file mode 100644 index 000000000..3d5226871 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/go.mod @@ -0,0 +1,3 @@ +module github.com/zclconf/go-cty-yaml + +require github.com/zclconf/go-cty v1.0.0 diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum new file mode 100644 index 000000000..841f7fcfe --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/go.sum @@ -0,0 +1,18 @@ +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go new file mode 100644 index 000000000..5b7b0686f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go @@ -0,0 +1,268 @@ +package yaml + +import ( + "errors" + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (c *Converter) impliedType(src []byte) (cty.Type, error) { + p := &yaml_parser_t{} + if !yaml_parser_initialize(p) { + return cty.NilType, errors.New("failed to initialize YAML parser") + } + if len(src) == 0 { + src = []byte{'\n'} + } + + an := &typeAnalysis{ + anchorsPending: map[string]int{}, + anchorTypes: map[string]cty.Type{}, + } + + yaml_parser_set_input_string(p, src) + + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing stream start token") + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "missing start of document") + } + + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ == yaml_DOCUMENT_START_EVENT { + return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed") + } + if evt.typ != yaml_DOCUMENT_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String()) + } + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + if evt.typ != yaml_STREAM_END_EVENT { + return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value") + } + + return ty, err +} + +func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) { + var evt yaml_event_t + if !yaml_parser_parse(p, &evt) { + return cty.NilType, parserError(p) + } + return c.impliedTypeParseRemainder(an, &evt, p) +} + +func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + switch evt.typ { + case yaml_SCALAR_EVENT: + return c.impliedTypeScalar(an, evt, p) + case yaml_ALIAS_EVENT: + return c.impliedTypeAlias(an, evt, p) + case yaml_MAPPING_START_EVENT: + return c.impliedTypeMapping(an, evt, p) + case yaml_SEQUENCE_START_EVENT: + return c.impliedTypeSequence(an, evt, p) + case yaml_DOCUMENT_START_EVENT: + return cty.NilType, parseEventErrorf(evt, "only a single document is allowed") + case yaml_STREAM_END_EVENT: + // Decoding an empty buffer, probably + return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream") + default: + // Should never happen; the above should be comprehensive + return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String()) + } +} + +func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + src := evt.value + tag := string(evt.tag) + anchor := string(evt.anchor) + implicit := evt.implicit + + if len(anchor) > 0 { + an.beginAnchor(anchor) + } + + var ty cty.Type + switch { + case tag == "" && !implicit: + // Untagged explicit string + ty = cty.String + default: + v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style)) + if err != nil { + return cty.NilType, parseEventErrorWrap(evt, err) + } + if v.RawEquals(mergeMappingVal) { + // In any context other than a mapping key, this is just a plain string + ty = cty.String + } else { + ty = v.Type() + } + } + + if len(anchor) > 0 { + an.completeAnchor(anchor, ty) + } + return ty, nil +} + +func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_MAP_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + atys := make(map[string]cty.Type) + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_MAPPING_END_EVENT { + ty := cty.Object(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + if nextEvt.typ != yaml_SCALAR_EVENT { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style)) + if err != nil { + return cty.NilType, err + } + if keyVal.RawEquals(mergeMappingVal) { + // Merging the value (which must be a mapping) into our mapping, + // then. + ty, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + if !ty.IsObjectType() { + return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName()) + } + for name, aty := range ty.AttributeTypes() { + atys[name] = aty + } + continue + } + if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil { + keyVal = keyValStr + } else { + return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys") + } + if keyVal.IsNull() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null") + } + if !keyVal.IsKnown() { + return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known") + } + valTy, err := c.impliedTypeParse(an, p) + if err != nil { + return cty.NilType, err + } + + atys[keyVal.AsString()] = valTy + } +} + +func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + tag := string(evt.tag) + anchor := string(evt.anchor) + + if tag != "" && tag != yaml_SEQ_TAG { + return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag) + } + + if anchor != "" { + an.beginAnchor(anchor) + } + + var atys []cty.Type + for { + var nextEvt yaml_event_t + if !yaml_parser_parse(p, &nextEvt) { + return cty.NilType, parserError(p) + } + if nextEvt.typ == yaml_SEQUENCE_END_EVENT { + ty := cty.Tuple(atys) + if anchor != "" { + an.completeAnchor(anchor, ty) + } + return ty, nil + } + + valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p) + if err != nil { + return cty.NilType, err + } + + atys = append(atys, valTy) + } +} + +func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) { + ty, err := an.anchorType(string(evt.anchor)) + if err != nil { + err = parseEventErrorWrap(evt, err) + } + return ty, err +} + +type typeAnalysis struct { + anchorsPending map[string]int + anchorTypes map[string]cty.Type +} + +func (an *typeAnalysis) beginAnchor(name string) { + an.anchorsPending[name]++ +} + +func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) { + an.anchorsPending[name]-- + if an.anchorsPending[name] == 0 { + delete(an.anchorsPending, name) + } + an.anchorTypes[name] = ty +} + +func (an *typeAnalysis) anchorType(name string) (cty.Type, error) { + if _, pending := an.anchorsPending[name]; pending { + // YAML normally allows self-referencing structures, but cty cannot + // represent them (it requires all structures to be finite) so we + // must fail here. + return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name) + } + ty, ok := an.anchorTypes[name] + if !ok { + return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name) + } + return ty, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go new file mode 100644 index 000000000..81d05dfe5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go new file mode 100644 index 000000000..7c1f5fac3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go new file mode 100644 index 000000000..0f6438342 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/resolve.go @@ -0,0 +1,288 @@ +package yaml + +import ( + "encoding/base64" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" +) + +type resolveMapItem struct { + value cty.Value + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v cty.Value + tag string + l []string + }{ + {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) { + if !resolvableTag(tag) { + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if src != "" { + hint = resolveTable[src[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE { + return cty.StringVal(src), nil + } + + // Handle things we can lookup in a map. + if item, ok := resolveMap[src]; ok { + return item.value, nil + } + + if tag == "" { + for _, nan := range []string{".nan", ".NaN", ".NAN"} { + if src == nan { + // cty cannot represent NaN, so this is an error + return cty.NilVal, fmt.Errorf("floating point NaN is not supported") + } + } + } + + // Base 60 floats are intentionally not supported. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + if numberVal, err := cty.ParseNumberVal(src); err == nil { + return numberVal, nil + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + } + + plain := strings.Replace(src, "_", "", -1) + if numberVal, err := cty.ParseNumberVal(plain); err == nil { + return numberVal, nil + } + if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") { + tag = yaml_INT_TAG // will handle parsing below in our tag switch + } + default: + panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src)) + } + } + + if tag == "" && src == "<<" { + return mergeMappingVal, nil + } + + switch tag { + case yaml_STR_TAG, yaml_BINARY_TAG: + // If it's binary then we want to keep the base64 representation, because + // cty has no binary type, but we will check that it's actually base64. + if tag == yaml_BINARY_TAG { + _, err := base64.StdEncoding.DecodeString(src) + if err != nil { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag) + } + } + return cty.StringVal(src), nil + case yaml_BOOL_TAG: + item, ok := resolveMap[src] + if !ok || item.tag != yaml_BOOL_TAG { + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + } + return item.value, nil + case yaml_FLOAT_TAG, yaml_INT_TAG: + // Note: We don't actually check that a value tagged INT is a whole + // number here. We could, but cty generally doesn't care about the + // int/float distinction, so we'll just be generous and accept it. + plain := strings.Replace(src, "_", "", -1) + if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats + return numberVal, nil + } + if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberIntVal(intv), nil + } + if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes + return cty.NumberUIntVal(uintv), nil + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + return cty.NumberIntVal(intv), nil + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return cty.NumberUIntVal(uintv), nil + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + return cty.NumberIntVal(intv), nil + } + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_TIMESTAMP_TAG: + t, ok := parseTimestamp(src) + if ok { + // cty has no timestamp type, but its functions stdlib + // conventionally uses strings in an RFC3339 encoding + // to represent time, so we'll follow that convention here. + return cty.StringVal(t.Format(time.RFC3339)), nil + } + return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag) + case yaml_NULL_TAG: + return cty.NullVal(cty.DynamicPseudoType), nil + case "": + return cty.StringVal(src), nil + default: + return cty.NilVal, fmt.Errorf("unsupported tag %q", tag) + } +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} + +type mergeMapping struct{} + +var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{})) +var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{}) diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go new file mode 100644 index 000000000..077fd1dd2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go new file mode 100644 index 000000000..a2dde608c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go new file mode 100644 index 000000000..2c314cc16 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yaml.go @@ -0,0 +1,215 @@ +// Package yaml can marshal and unmarshal cty values in YAML format. +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal reads the document found within the given source buffer +// and attempts to convert it into a value conforming to the given type +// constraint. +// +// This is an alias for Unmarshal on the predefined Converter in "Standard". +// +// An error is returned if the given source contains any YAML document +// delimiters. +func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) { + return Standard.Unmarshal(src, ty) +} + +// Marshal serializes the given value into a YAML document, using a fixed +// mapping from cty types to YAML constructs. +// +// This is an alias for Marshal on the predefined Converter in "Standard". +// +// Note that unlike the function of the same name in the cty JSON package, +// this does not take a type constraint and therefore the YAML serialization +// cannot preserve late-bound type information in the serialization to be +// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type +// constraint given to Unmarshal will be decoded as if the corresponding portion +// of the input were processed with ImpliedType to find a target type. +func Marshal(v cty.Value) ([]byte, error) { + return Standard.Marshal(v) +} + +// ImpliedType analyzes the given source code and returns a suitable type that +// it could be decoded into. +// +// For a converter that is using standard YAML rather than cty-specific custom +// tags, only a subset of cty types can be produced: strings, numbers, bools, +// tuple types, and object types. +// +// This is an alias for ImpliedType on the predefined Converter in "Standard". +func ImpliedType(src []byte) (cty.Type, error) { + return Standard.ImpliedType(src) +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go new file mode 100644 index 000000000..e25cee563 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/zclconf/go-cty/LICENSE b/vendor/github.com/zclconf/go-cty/LICENSE new file mode 100644 index 000000000..d6503b555 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 000000000..d273d1483 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 000000000..ab3919b14 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 000000000..d84f6ac10 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 000000000..f9aacb4ee --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,143 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 000000000..3039ba22e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,340 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, path.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 000000000..4d19cf6c5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go new file mode 100644 index 000000000..62dabb8d1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 000000000..e0dbf491e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,48 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 000000000..592980a70 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 000000000..2037299ba --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 000000000..581304ecd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 000000000..af19bdc50 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 000000000..b7769106d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 000000000..53ebbfe08 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,314 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 000000000..d31f0547b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 000000000..0bf84c774 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 000000000..dd139f724 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go new file mode 100644 index 000000000..bfd30157e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -0,0 +1,50 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Parameter represents a parameter to a function. +type Parameter struct { + // Name is an optional name for the argument. This package ignores this + // value, but callers may use it for documentation, etc. + Name string + + // A type that any argument for this parameter must conform to. + // cty.DynamicPseudoType can be used, either at top-level or nested + // in a parameterized type, to indicate that any type should be + // permitted, to allow the definition of type-generic functions. + Type cty.Type + + // If AllowNull is set then null values may be passed into this + // argument's slot in both the type-check function and the implementation + // function. If not set, such values are rejected by the built-in + // checking rules. + AllowNull bool + + // If AllowUnknown is set then unknown values may be passed into this + // argument's slot in the implementation function. If not set, any + // unknown values will cause the function to immediately return + // an unkonwn value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + AllowUnknown bool + + // If AllowDynamicType is set then DynamicVal may be passed into this + // argument's slot in the implementation function. If not set, any + // dynamic values will cause the function to immediately return + // DynamicVal value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + // + // Note that DynamicVal is also unknown, so in order to receive dynamic + // *values* it is also necessary to set AllowUnknown. + // + // However, it is valid to set AllowDynamicType without AllowUnknown, in + // which case a dynamic value may be passed to the type checking function + // but will not make it to the *implementation* function. Instead, an + // unknown value of the type returned by the type-check function will be + // returned. This is suggested for functions that have a static return + // type since it allows the return value to be typed even if the input + // values are not, thus improving the type-check accuracy of derived + // values. + AllowDynamicType bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go new file mode 100644 index 000000000..393b3110b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -0,0 +1,6 @@ +// Package function builds on the functionality of cty by modeling functions +// that operate on cty Values. +// +// Functions are, at their core, Go anonymous functions. However, this package +// wraps around them utility functions for parameter type checking, etc. +package function diff --git a/vendor/github.com/zclconf/go-cty/cty/function/error.go b/vendor/github.com/zclconf/go-cty/cty/function/error.go new file mode 100644 index 000000000..2b5677998 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/error.go @@ -0,0 +1,50 @@ +package function + +import ( + "fmt" + "runtime/debug" +) + +// ArgError represents an error with one of the arguments in a call. The +// attribute Index represents the zero-based index of the argument in question. +// +// Its error *may* be a cty.PathError, in which case the error actually +// pertains to a nested value within the data structure passed as the argument. +type ArgError struct { + error + Index int +} + +func NewArgErrorf(i int, f string, args ...interface{}) error { + return ArgError{ + error: fmt.Errorf(f, args...), + Index: i, + } +} + +func NewArgError(i int, err error) error { + return ArgError{ + error: err, + Index: i, + } +} + +// PanicError indicates that a panic occurred while executing either a +// function's type or implementation function. This is captured and wrapped +// into a normal error so that callers (expected to be language runtimes) +// are freed from having to deal with panics in buggy functions. +type PanicError struct { + Value interface{} + Stack []byte +} + +func errorForPanic(val interface{}) error { + return PanicError{ + Value: val, + Stack: debug.Stack(), + } +} + +func (e PanicError) Error() string { + return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go new file mode 100644 index 000000000..9e8bf3376 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -0,0 +1,291 @@ +package function + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Function represents a function. This is the main type in this package. +type Function struct { + spec *Spec +} + +// Spec is the specification of a function, used to instantiate +// a new Function. +type Spec struct { + // Params is a description of the positional parameters for the function. + // The standard checking logic rejects any calls that do not provide + // arguments conforming to this definition, freeing the function + // implementer from dealing with such inconsistencies. + Params []Parameter + + // VarParam is an optional specification of additional "varargs" the + // function accepts. If this is non-nil then callers may provide an + // arbitrary number of additional arguments (after those matching with + // the fixed parameters in Params) that conform to the given specification, + // which will appear as additional values in the slices of values + // provided to the type and implementation functions. + VarParam *Parameter + + // Type is the TypeFunc that decides the return type of the function + // given its arguments, which may be Unknown. See the documentation + // of TypeFunc for more information. + // + // Use StaticReturnType if the function's return type does not vary + // depending on its arguments. + Type TypeFunc + + // Impl is the ImplFunc that implements the function's behavior. + // + // Functions are expected to behave as pure functions, and not create + // any visible side-effects. + // + // If a TypeFunc is also provided, the value returned from Impl *must* + // conform to the type it returns, or a call to the function will panic. + Impl ImplFunc +} + +// New creates a new function with the given specification. +// +// After passing a Spec to this function, the caller must no longer read from +// or mutate it. +func New(spec *Spec) Function { + f := Function{ + spec: spec, + } + return f +} + +// TypeFunc is a callback type for determining the return type of a function +// given its arguments. +// +// Any of the values passed to this function may be unknown, even if the +// parameters are not configured to accept unknowns. +// +// If any of the given values are *not* unknown, the TypeFunc may use the +// values for pre-validation and for choosing the return type. For example, +// a hypothetical JSON-unmarshalling function could return +// cty.DynamicPseudoType if the given JSON string is unknown, but return +// a concrete type based on the JSON structure if the JSON string is already +// known. +type TypeFunc func(args []cty.Value) (cty.Type, error) + +// ImplFunc is a callback type for the main implementation of a function. +// +// "args" are the values for the arguments, and this slice will always be at +// least as long as the argument definition slice for the function. +// +// "retType" is the type returned from the Type callback, included as a +// convenience to avoid the need to re-compute the return type for generic +// functions whose return type is a function of the arguments. +type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) + +// StaticReturnType returns a TypeFunc that always returns the given type. +// +// This is provided as a convenience for defining a function whose return +// type does not depend on the argument types. +func StaticReturnType(ty cty.Type) TypeFunc { + return func([]cty.Value) (cty.Type, error) { + return ty, nil + } +} + +// ReturnType returns the return type of a function given a set of candidate +// argument types, or returns an error if the given types are unacceptable. +// +// If the caller already knows values for at least some of the arguments +// it can be better to call ReturnTypeForValues, since certain functions may +// determine their return types from their values and return DynamicVal if +// the values are unknown. +func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { + vals := make([]cty.Value, len(argTypes)) + for i, ty := range argTypes { + vals[i] = cty.UnknownVal(ty) + } + return f.ReturnTypeForValues(vals) +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + var posArgs []cty.Value + var varArgs []cty.Value + + if f.spec.VarParam == nil { + if len(args) != len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (%d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args + varArgs = nil + } else { + if len(args) < len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (at least %d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args[0:len(f.spec.Params)] + varArgs = args[len(f.spec.Params):] + } + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(i, "argument must not be null") + } + + // AllowUnknown is ignored for type-checking, since we expect to be + // able to type check with unknown values. We *do* still need to deal + // with DynamicPseudoType here though, since the Type function might + // not be ready to deal with that. + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + + if varArgs != nil { + spec := f.spec.VarParam + for i, val := range varArgs { + realI := i + len(posArgs) + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(realI, "argument must not be null") + } + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + } + + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + ty = cty.NilType + err = errorForPanic(r) + } + }() + + return f.spec.Type(args) +} + +// Call actually calls the function with the given arguments, which must +// conform to the function's parameter specification or an error will be +// returned. +func (f Function) Call(args []cty.Value) (val cty.Value, err error) { + expectedType, err := f.ReturnTypeForValues(args) + if err != nil { + return cty.NilVal, err + } + + // Type checking already dealt with most situations relating to our + // parameter specification, but we still need to deal with unknown + // values. + posArgs := args[:len(f.spec.Params)] + varArgs := args[len(f.spec.Params):] + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + + if f.spec.VarParam != nil { + spec := f.spec.VarParam + for _, val := range varArgs { + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + } + + var retVal cty.Value + { + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + val = cty.NilVal + err = errorForPanic(r) + } + }() + + retVal, err = f.spec.Impl(args, expectedType) + if err != nil { + return cty.NilVal, err + } + } + + // Returned value must conform to what the Type function expected, to + // protect callers from having to deal with inconsistencies. + if errs := retVal.Type().TestConformance(expectedType); errs != nil { + panic(fmt.Errorf( + "returned value %#v does not conform to expected return type %#v: %s", + retVal, expectedType, errs[0], + )) + } + + return retVal, nil +} + +// ProxyFunc the type returned by the method Function.Proxy. +type ProxyFunc func(args ...cty.Value) (cty.Value, error) + +// Proxy returns a function that can be called with cty.Value arguments +// to run the function. This is provided as a convenience for when using +// a function directly within Go code. +func (f Function) Proxy() ProxyFunc { + return func(args ...cty.Value) (cty.Value, error) { + return f.Call(args) + } +} + +// Params returns information about the function's fixed positional parameters. +// This does not include information about any variadic arguments accepted; +// for that, call VarParam. +func (f Function) Params() []Parameter { + new := make([]Parameter, len(f.spec.Params)) + copy(new, f.spec.Params) + return new +} + +// VarParam returns information about the variadic arguments the function +// expects, or nil if the function is not variadic. +func (f Function) VarParam() *Parameter { + if f.spec.VarParam == nil { + return nil + } + + ret := *f.spec.VarParam + return &ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go new file mode 100644 index 000000000..a473d0ec3 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -0,0 +1,73 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var NotFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Not(), nil + }, +}) + +var AndFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].And(args[1]), nil + }, +}) + +var OrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Or(args[1]), nil + }, +}) + +// Not returns the logical complement of the given boolean value. +func Not(num cty.Value) (cty.Value, error) { + return NotFunc.Call([]cty.Value{num}) +} + +// And returns true if and only if both of the given boolean values are true. +func And(a, b cty.Value) (cty.Value, error) { + return AndFunc.Call([]cty.Value{a, b}) +} + +// Or returns true if either of the given boolean values are true. +func Or(a, b cty.Value) (cty.Value, error) { + return OrFunc.Call([]cty.Value{a, b}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go new file mode 100644 index 000000000..a132e0cde --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -0,0 +1,112 @@ +package stdlib + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Bytes is a capsule type that can be used with the binary functions to +// support applications that need to support raw buffers in addition to +// UTF-8 strings. +var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil))) + +// BytesVal creates a new Bytes value from the given buffer, which must be +// non-nil or this function will panic. +// +// Once a byte slice has been wrapped in a Bytes capsule, its underlying array +// must be considered immutable. +func BytesVal(buf []byte) cty.Value { + if buf == nil { + panic("can't make Bytes value from nil slice") + } + + return cty.CapsuleVal(Bytes, &buf) +} + +// BytesLen is a Function that returns the length of the buffer encapsulated +// in a Bytes value. +var BytesLenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + return cty.NumberIntVal(int64(len(*bufPtr))), nil + }, +}) + +// BytesSlice is a Function that returns a slice of the given Bytes value. +var BytesSliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(Bytes), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 || length < 0 { + return cty.NilVal, fmt.Errorf("offset and length must be non-negative") + } + + if offset > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d is greater than total buffer length %d", + offset, len(*bufPtr), + ) + } + + end := offset + length + + if end > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d + length %d is greater than total buffer length %d", + offset, length, len(*bufPtr), + ) + } + + return BytesVal((*bufPtr)[offset:end]), nil + }, +}) + +func BytesLen(buf cty.Value) (cty.Value, error) { + return BytesLenFunc.Call([]cty.Value{buf}) +} + +func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return BytesSliceFunc.Call([]cty.Value{buf, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go new file mode 100644 index 000000000..967ba03c8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -0,0 +1,140 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var HasIndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasIndex(args[1]), nil + }, +}) + +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + key := args[1] + keyTy := key.Type() + switch { + case collTy.IsTupleType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for tuple must be number") + } + if !key.IsKnown() { + return cty.DynamicPseudoType, nil + } + var idx int + err := gocty.FromCtyValue(key, &idx) + if err != nil { + return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err) + } + + etys := collTy.TupleElementTypes() + + if idx >= len(etys) || idx < 0 { + return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys)) + } + + return etys[idx], nil + + case collTy.IsListType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for list must be number") + } + + return collTy.ElementType(), nil + + case collTy.IsMapType(): + if keyTy != cty.String && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for map must be string") + } + + return collTy.ElementType(), nil + + default: + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + has, err := HasIndex(args[0], args[1]) + if err != nil { + return cty.NilVal, err + } + if has.False() { // safe because collection and key are guaranteed known here + return cty.NilVal, fmt.Errorf("invalid index") + } + + return args[0].Index(args[1]), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Number, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Length(), nil + }, +}) + +// HasIndex determines whether the given collection can be indexed with the +// given key. +func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) { + return HasIndexFunc.Call([]cty.Value{collection, key}) +} + +// Index returns an element from the given collection using the given key, +// or returns an error if there is no element for the given key. +func Index(collection cty.Value, key cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{collection, key}) +} + +// Length returns the number of elements in the given collection. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go new file mode 100644 index 000000000..5070a5adf --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -0,0 +1,93 @@ +package stdlib + +import ( + "encoding/csv" + "fmt" + "io" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var CSVDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + headers, err := cr.Read() + if err == io.EOF { + return cty.DynamicPseudoType, fmt.Errorf("missing header line") + } + if err != nil { + return cty.DynamicPseudoType, err + } + + atys := make(map[string]cty.Type, len(headers)) + for _, name := range headers { + if _, exists := atys[name]; exists { + return cty.DynamicPseudoType, fmt.Errorf("duplicate column name %q", name) + } + atys[name] = cty.String + } + return cty.List(cty.Object(atys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + atys := ety.AttributeTypes() + str := args[0] + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + cr.FieldsPerRecord = len(atys) + + // Read the header row first, since that'll tell us which indices + // map to which attribute names. + headers, err := cr.Read() + if err != nil { + return cty.DynamicVal, err + } + + var rows []cty.Value + for { + cols, err := cr.Read() + if err == io.EOF { + break + } + if err != nil { + return cty.DynamicVal, err + } + + vals := make(map[string]cty.Value, len(cols)) + for i, str := range cols { + name := headers[i] + vals[name] = cty.StringVal(str) + } + rows = append(rows, cty.ObjectVal(vals)) + } + + if len(rows) == 0 { + return cty.ListValEmpty(ety), nil + } + return cty.ListVal(rows), nil + }, +}) + +// CSVDecode parses the given CSV (RFC 4180) string and, if it is valid, +// returns a list of objects representing the rows. +// +// The result is always a list of some object type. The first row of the +// input is used to determine the object attributes, and subsequent rows +// determine the values of those attributes. +func CSVDecode(str cty.Value) (cty.Value, error) { + return CSVDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go new file mode 100644 index 000000000..aa15b7bde --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -0,0 +1,385 @@ +package stdlib + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var FormatDateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + { + Name: "time", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + formatStr := args[0].AsString() + timeStr := args[1].AsString() + t, err := parseTimestamp(timeStr) + if err != nil { + return cty.DynamicVal, function.NewArgError(1, err) + } + + var buf bytes.Buffer + sc := bufio.NewScanner(strings.NewReader(formatStr)) + sc.Split(splitDateFormat) + const esc = '\'' + for sc.Scan() { + tok := sc.Bytes() + + // The leading byte signals the token type + switch { + case tok[0] == esc: + if tok[len(tok)-1] != esc || len(tok) == 1 { + return cty.DynamicVal, function.NewArgErrorf(0, "unterminated literal '") + } + if len(tok) == 2 { + // Must be a single escaped quote, '' + buf.WriteByte(esc) + } else { + // The content (until a closing esc) is printed out verbatim + // except that we must un-double any double-esc escapes in + // the middle of the string. + raw := tok[1 : len(tok)-1] + for i := 0; i < len(raw); i++ { + buf.WriteByte(raw[i]) + if raw[i] == esc { + i++ // skip the escaped quote + } + } + } + + case startsDateFormatVerb(tok[0]): + switch tok[0] { + case 'Y': + y := t.Year() + switch len(tok) { + case 2: + fmt.Fprintf(&buf, "%02d", y%100) + case 4: + fmt.Fprintf(&buf, "%04d", y) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: year must either be \"YY\" or \"YYYY\"", tok) + } + case 'M': + m := t.Month() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + case 3: + buf.WriteString(m.String()[:3]) + case 4: + buf.WriteString(m.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: month must be \"M\", \"MM\", \"MMM\", or \"MMMM\"", tok) + } + case 'D': + d := t.Day() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", d) + case 2: + fmt.Fprintf(&buf, "%02d", d) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of month must either be \"D\" or \"DD\"", tok) + } + case 'E': + d := t.Weekday() + switch len(tok) { + case 3: + buf.WriteString(d.String()[:3]) + case 4: + buf.WriteString(d.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of week must either be \"EEE\" or \"EEEE\"", tok) + } + case 'h': + h := t.Hour() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 24-hour must either be \"h\" or \"hh\"", tok) + } + case 'H': + h := t.Hour() % 12 + if h == 0 { + h = 12 + } + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 12-hour must either be \"H\" or \"HH\"", tok) + } + case 'A', 'a': + if len(tok) != 2 { + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: must be \"%s%s\"", tok, tok[0:1], tok[0:1]) + } + upper := tok[0] == 'A' + switch t.Hour() / 12 { + case 0: + if upper { + buf.WriteString("AM") + } else { + buf.WriteString("am") + } + case 1: + if upper { + buf.WriteString("PM") + } else { + buf.WriteString("pm") + } + } + case 'm': + m := t.Minute() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: minute must either be \"m\" or \"mm\"", tok) + } + case 's': + s := t.Second() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", s) + case 2: + fmt.Fprintf(&buf, "%02d", s) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: second must either be \"s\" or \"ss\"", tok) + } + case 'Z': + // We'll just lean on Go's own formatter for this one, since + // the necessary information is unexported. + switch len(tok) { + case 1: + buf.WriteString(t.Format("Z07:00")) + case 3: + str := t.Format("-0700") + switch str { + case "+0000": + buf.WriteString("UTC") + default: + buf.WriteString(str) + } + case 4: + buf.WriteString(t.Format("-0700")) + case 5: + buf.WriteString(t.Format("-07:00")) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: timezone must be Z, ZZZZ, or ZZZZZ", tok) + } + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q", tok) + } + + default: + // Any other starting character indicates a literal sequence + buf.Write(tok) + } + } + + return cty.StringVal(buf.String()), nil + }, +}) + +// FormatDate reformats a timestamp given in RFC3339 syntax into another time +// syntax defined by a given format string. +// +// The format string uses letter mnemonics to represent portions of the +// timestamp, with repetition signifying length variants of each portion. +// Single quote characters ' can be used to quote sequences of literal letters +// that should not be interpreted as formatting mnemonics. +// +// The full set of supported mnemonic sequences is listed below: +// +// YY Year modulo 100 zero-padded to two digits, like "06". +// YYYY Four (or more) digit year, like "2006". +// M Month number, like "1" for January. +// MM Month number zero-padded to two digits, like "01". +// MMM English month name abbreviated to three letters, like "Jan". +// MMMM English month name unabbreviated, like "January". +// D Day of month number, like "2". +// DD Day of month number zero-padded to two digits, like "02". +// EEE English day of week name abbreviated to three letters, like "Mon". +// EEEE English day of week name unabbreviated, like "Monday". +// h 24-hour number, like "2". +// hh 24-hour number zero-padded to two digits, like "02". +// H 12-hour number, like "2". +// HH 12-hour number zero-padded to two digits, like "02". +// AA Hour AM/PM marker in uppercase, like "AM". +// aa Hour AM/PM marker in lowercase, like "am". +// m Minute within hour, like "5". +// mm Minute within hour zero-padded to two digits, like "05". +// s Second within minute, like "9". +// ss Second within minute zero-padded to two digits, like "09". +// ZZZZ Timezone offset with just sign and digit, like "-0800". +// ZZZZZ Timezone offset with colon separating hours and minutes, like "-08:00". +// Z Like ZZZZZ but with a special case "Z" for UTC. +// ZZZ Like ZZZZ but with a special case "UTC" for UTC. +// +// The format syntax is optimized mainly for generating machine-oriented +// timestamps rather than human-oriented timestamps; the English language +// portions of the output reflect the use of English names in a number of +// machine-readable date formatting standards. For presentation to humans, +// a locale-aware time formatter (not included in this package) is a better +// choice. +// +// The format syntax is not compatible with that of any other language, but +// is optimized so that patterns for common standard date formats can be +// recognized quickly even by a reader unfamiliar with the format syntax. +func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { + return FormatDateFunc.Call([]cty.Value{format, timestamp}) +} + +func parseTimestamp(ts string) (time.Time, error) { + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because time.RFC3339 includes only the + // above portions, but since that might change in future we'll + // be robust here. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + return time.Time{}, err + } + return t, nil +} + +// splitDataFormat is a bufio.SplitFunc used to tokenize a date format. +func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { + if len(data) == 0 { + return 0, nil, nil + } + + const esc = '\'' + + switch { + + case data[0] == esc: + // If we have another quote immediately after then this is a single + // escaped escape. + if len(data) > 1 && data[1] == esc { + return 2, data[:2], nil + } + + // Beginning of quoted sequence, so we will seek forward until we find + // the closing quote, ignoring escaped quotes along the way. + for i := 1; i < len(data); i++ { + if data[i] == esc { + if (i + 1) == len(data) { + // We need at least one more byte to decide if this is an + // escape or a terminator. + return 0, nil, nil + } + if data[i+1] == esc { + i++ // doubled-up quotes are an escape sequence + continue + } + // We've found the closing quote + return i + 1, data[:i+1], nil + } + } + // If we fall out here then we need more bytes to find the end, + // unless we're already at the end with an unclosed quote. + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + + case startsDateFormatVerb(data[0]): + rep := data[0] + for i := 1; i < len(data); i++ { + if data[i] != rep { + return i, data[:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // We need more data to decide if we've found the end + return 0, nil, nil + + default: + for i := 1; i < len(data); i++ { + if data[i] == esc || startsDateFormatVerb(data[i]) { + return i, data[:i], nil + } + } + // We might not actually be at the end of a literal sequence, + // but that doesn't matter since we'll concat them back together + // anyway. + return len(data), data, nil + } +} + +func startsDateFormatVerb(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go new file mode 100644 index 000000000..cfb613e5a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go @@ -0,0 +1,13 @@ +// Package stdlib is a collection of cty functions that are expected to be +// generally useful, and are thus factored out into this shared library in +// the hope that cty-using applications will have consistent behavior when +// using these functions. +// +// See the parent package "function" for more information on the purpose +// and usage of cty functions. +// +// This package contains both Go functions, which provide convenient access +// to call the functions from Go code, and the Function objects themselves. +// The latter follow the naming scheme of appending "Func" to the end of +// the function name. +package stdlib diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go new file mode 100644 index 000000000..664790b46 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -0,0 +1,501 @@ +package stdlib + +import ( + "bytes" + "fmt" + "math/big" + "strings" + + "github.com/apparentlymart/go-textseg/textseg" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +//go:generate ragel -Z format_fsm.rl +//go:generate gofmt -w format_fsm.go + +var FormatFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + for _, arg := range args[1:] { + if !arg.IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + return cty.UnknownVal(cty.String), nil + } + } + str, err := formatFSM(args[0].AsString(), args[1:]) + return cty.StringVal(str), err + }, +}) + +var FormatListFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + fmtVal := args[0] + args = args[1:] + + if len(args) == 0 { + // With no arguments, this function is equivalent to Format, but + // returning a single-element list result. + result, err := Format(fmtVal, args...) + return cty.ListVal([]cty.Value{result}), err + } + + fmtStr := fmtVal.AsString() + + // Each of our arguments will be dealt with either as an iterator + // or as a single value. Iterators are used for sequence-type values + // (lists, sets, tuples) while everything else is treated as a + // single value. The sequences we iterate over are required to be + // all the same length. + iterLen := -1 + lenChooser := -1 + iterators := make([]cty.ElementIterator, len(args)) + singleVals := make([]cty.Value, len(args)) + for i, arg := range args { + argTy := arg.Type() + switch { + case (argTy.IsListType() || argTy.IsSetType() || argTy.IsTupleType()) && !arg.IsNull(): + if !argTy.IsTupleType() && !arg.IsKnown() { + // We can't iterate this one at all yet then, so we can't + // yet produce a result. + return cty.UnknownVal(retType), nil + } + thisLen := arg.LengthInt() + if iterLen == -1 { + iterLen = thisLen + lenChooser = i + } else { + if thisLen != iterLen { + return cty.NullVal(cty.List(cty.String)), function.NewArgErrorf( + i+1, + "argument %d has length %d, which is inconsistent with argument %d of length %d", + i+1, thisLen, + lenChooser+1, iterLen, + ) + } + } + iterators[i] = arg.ElementIterator() + default: + singleVals[i] = arg + } + } + + if iterLen == 0 { + // If our sequences are all empty then our result must be empty. + return cty.ListValEmpty(cty.String), nil + } + + if iterLen == -1 { + // If we didn't encounter any iterables at all then we're going + // to just do one iteration with items from singleVals. + iterLen = 1 + } + + ret := make([]cty.Value, 0, iterLen) + fmtArgs := make([]cty.Value, len(iterators)) + Results: + for iterIdx := 0; iterIdx < iterLen; iterIdx++ { + + // Construct our arguments for a single format call + for i := range fmtArgs { + switch { + case iterators[i] != nil: + iterator := iterators[i] + iterator.Next() + _, val := iterator.Element() + fmtArgs[i] = val + default: + fmtArgs[i] = singleVals[i] + } + + // If any of the arguments to this call would be unknown then + // this particular result is unknown, but we'll keep going + // to see if any other iterations can produce known values. + if !fmtArgs[i].IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + ret = append(ret, cty.UnknownVal(cty.String)) + continue Results + } + } + + str, err := formatFSM(fmtStr, fmtArgs) + if err != nil { + return cty.NullVal(cty.List(cty.String)), fmt.Errorf( + "error on format iteration %d: %s", iterIdx, err, + ) + } + + ret = append(ret, cty.StringVal(str)) + } + + return cty.ListVal(ret), nil + }, +}) + +// Format produces a string representation of zero or more values using a +// format string similar to the "printf" function in C. +// +// It supports the following "verbs": +// +// %% Literal percent sign, consuming no value +// %v A default formatting of the value based on type, as described below. +// %#v JSON serialization of the value +// %t Converts to boolean and then produces "true" or "false" +// %b Converts to number, requires integer, produces binary representation +// %d Converts to number, requires integer, produces decimal representation +// %o Converts to number, requires integer, produces octal representation +// %x Converts to number, requires integer, produces hexadecimal representation +// with lowercase letters +// %X Like %x but with uppercase letters +// %e Converts to number, produces scientific notation like -1.234456e+78 +// %E Like %e but with an uppercase "E" representing the exponent +// %f Converts to number, produces decimal representation with fractional +// part but no exponent, like 123.456 +// %g %e for large exponents or %f otherwise +// %G %E for large exponents or %f otherwise +// %s Converts to string and produces the string's characters +// %q Converts to string and produces JSON-quoted string representation, +// like %v. +// +// The default format selections made by %v are: +// +// string %s +// number %g +// bool %t +// other %#v +// +// Null values produce the literal keyword "null" for %v and %#v, and produce +// an error otherwise. +// +// Width is specified by an optional decimal number immediately preceding the +// verb letter. If absent, the width is whatever is necessary to represent the +// value. Precision is specified after the (optional) width by a period +// followed by a decimal number. If no period is present, a default precision +// is used. A period with no following number is invalid. +// For examples: +// +// %f default width, default precision +// %9f width 9, default precision +// %.2f default width, precision 2 +// %9.2f width 9, precision 2 +// +// Width and precision are measured in unicode characters (grapheme clusters). +// +// For most values, width is the minimum number of characters to output, +// padding the formatted form with spaces if necessary. +// +// For strings, precision limits the length of the input to be formatted (not +// the size of the output), truncating if necessary. +// +// For numbers, width sets the minimum width of the field and precision sets +// the number of places after the decimal, if appropriate, except that for +// %g/%G precision sets the total number of significant digits. +// +// The following additional symbols can be used immediately after the percent +// introducer as flags: +// +// (a space) leave a space where the sign would be if number is positive +// + Include a sign for a number even if it is positive (numeric only) +// - Pad with spaces on the left rather than the right +// 0 Pad with zeros rather than spaces. +// +// Flag characters are ignored for verbs that do not support them. +// +// By default, % sequences consume successive arguments starting with the first. +// Introducing a [n] sequence immediately before the verb letter, where n is a +// decimal integer, explicitly chooses a particular value argument by its +// one-based index. Subsequent calls without an explicit index will then +// proceed with n+1, n+2, etc. +// +// An error is produced if the format string calls for an impossible conversion +// or accesses more values than are given. An error is produced also for +// an unsupported format verb. +func Format(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatFunc.Call(args) +} + +// FormatList applies the same formatting behavior as Format, but accepts +// a mixture of list and non-list values as arguments. Any list arguments +// passed must have the same length, which dictates the length of the +// resulting list. +// +// Any non-list arguments are used repeatedly for each iteration over the +// list arguments. The list arguments are iterated in order by key, so +// corresponding items are formatted together. +func FormatList(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatListFunc.Call(args) +} + +type formatVerb struct { + Raw string + Offset int + + ArgNum int + Mode rune + + Zero bool + Sharp bool + Plus bool + Minus bool + Space bool + + HasPrec bool + Prec int + + HasWidth bool + Width int +} + +// formatAppend is called by formatFSM (generated by format_fsm.rl) for each +// formatting sequence that is encountered. +func formatAppend(verb *formatVerb, buf *bytes.Buffer, args []cty.Value) error { + argIdx := verb.ArgNum - 1 + if argIdx >= len(args) { + return fmt.Errorf( + "not enough arguments for %q at %d: need index %d but have %d total", + verb.Raw, verb.Offset, + verb.ArgNum, len(args), + ) + } + arg := args[argIdx] + + if verb.Mode != 'v' && arg.IsNull() { + return fmt.Errorf("unsupported value for %q at %d: null value cannot be formatted", verb.Raw, verb.Offset) + } + + // Normalize to make some things easier for downstream formatters + if !verb.HasWidth { + verb.Width = -1 + } + if !verb.HasPrec { + verb.Prec = -1 + } + + // For our first pass we'll ensure the verb is supported and then fan + // out to other functions based on what conversion is needed. + switch verb.Mode { + + case 'v': + return formatAppendAsIs(verb, buf, arg) + + case 't': + return formatAppendBool(verb, buf, arg) + + case 'b', 'd', 'o', 'x', 'X', 'e', 'E', 'f', 'g', 'G': + return formatAppendNumber(verb, buf, arg) + + case 's', 'q': + return formatAppendString(verb, buf, arg) + + default: + return fmt.Errorf("unsupported format verb %q in %q at offset %d", verb.Mode, verb.Raw, verb.Offset) + } +} + +func formatAppendAsIs(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + + if !verb.Sharp && !arg.IsNull() { + // Unless the caller overrode it with the sharp flag, we'll try some + // specialized formats before we fall back on JSON. + switch arg.Type() { + case cty.String: + fmted := arg.AsString() + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + case cty.Number: + bf := arg.AsBigFloat() + fmted := bf.Text('g', -1) + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + } + } + + jb, err := json.Marshal(arg, arg.Type()) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + + return nil +} + +func formatAppendBool(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Bool) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + if arg.True() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + return nil +} + +func formatAppendNumber(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Number) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + switch verb.Mode { + case 'b', 'd', 'o', 'x', 'X': + return formatAppendInteger(verb, buf, arg) + default: + bf := arg.AsBigFloat() + + // For floats our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bf) + buf.WriteString(fmted) + return nil + } +} + +func formatAppendInteger(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + bf := arg.AsBigFloat() + bi, acc := bf.Int(nil) + if acc != big.Exact { + return fmt.Errorf("unsupported value for %q at %d: an integer is required", verb.Raw, verb.Offset) + } + + // For integers our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bi) + buf.WriteString(fmted) + return nil +} + +func formatAppendString(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.String) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + // We _cannot_ directly use the Go fmt.Sprintf implementation for strings + // because it measures widths and precisions in runes rather than grapheme + // clusters. + + str := arg.AsString() + if verb.Prec > 0 { + strB := []byte(str) + pos := 0 + wanted := verb.Prec + for i := 0; i < wanted; i++ { + next := strB[pos:] + if len(next) == 0 { + // ran out of characters before we hit our max width + break + } + d, _, _ := textseg.ScanGraphemeClusters(strB[pos:], true) + pos += d + } + str = str[:pos] + } + + switch verb.Mode { + case 's': + fmted := formatPadWidth(verb, str) + buf.WriteString(fmted) + case 'q': + jb, err := json.Marshal(cty.StringVal(str), cty.String) + if err != nil { + // Should never happen, since we know this is a known, non-null string + panic(fmt.Errorf("failed to marshal %#v as JSON: %s", arg, err)) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + default: + // Should never happen because formatAppend should've already validated + panic(fmt.Errorf("invalid string formatting mode %q", verb.Mode)) + } + return nil +} + +func formatPadWidth(verb *formatVerb, fmted string) string { + if verb.Width < 0 { + return fmted + } + + // Safe to ignore errors because ScanGraphemeClusters cannot produce errors + givenLen, _ := textseg.TokenCount([]byte(fmted), textseg.ScanGraphemeClusters) + wantLen := verb.Width + if givenLen >= wantLen { + return fmted + } + + padLen := wantLen - givenLen + padChar := " " + if verb.Zero { + padChar = "0" + } + pads := strings.Repeat(padChar, padLen) + + if verb.Minus { + return fmted + pads + } + return pads + fmted +} + +// formatStripIndexSegment strips out any [nnn] segment present in a verb +// string so that we can pass it through to Go's fmt.Sprintf with a single +// argument. This is used in cases where we're just leaning on Go's formatter +// because it's a superset of ours. +func formatStripIndexSegment(rawVerb string) string { + // We assume the string has already been validated here, since we should + // only be using this function with strings that were accepted by our + // scanner in formatFSM. + start := strings.Index(rawVerb, "[") + end := strings.Index(rawVerb, "]") + if start == -1 || end == -1 { + return rawVerb + } + + return rawVerb[:start] + rawVerb[end+1:] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go new file mode 100644 index 000000000..32b1ac971 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go @@ -0,0 +1,374 @@ +// line 1 "format_fsm.rl" +// This file is generated from format_fsm.rl. DO NOT EDIT. + +// line 5 "format_fsm.rl" + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// line 21 "format_fsm.go" +var _formatfsm_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 4, + 1, 5, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 14, + 1, 16, 1, 17, 1, 18, 2, 3, + 4, 2, 12, 10, 2, 12, 16, 2, + 12, 18, 2, 13, 14, 2, 15, 10, + 2, 15, 18, +} + +var _formatfsm_key_offsets []byte = []byte{ + 0, 0, 14, 27, 34, 36, 39, 43, + 51, +} + +var _formatfsm_trans_keys []byte = []byte{ + 32, 35, 37, 43, 45, 46, 48, 91, + 49, 57, 65, 90, 97, 122, 32, 35, + 43, 45, 46, 48, 91, 49, 57, 65, + 90, 97, 122, 91, 48, 57, 65, 90, + 97, 122, 49, 57, 93, 48, 57, 65, + 90, 97, 122, 46, 91, 48, 57, 65, + 90, 97, 122, 37, +} + +var _formatfsm_single_lengths []byte = []byte{ + 0, 8, 7, 1, 0, 1, 0, 2, + 1, +} + +var _formatfsm_range_lengths []byte = []byte{ + 0, 3, 3, 3, 1, 1, 2, 3, + 0, +} + +var _formatfsm_index_offsets []byte = []byte{ + 0, 0, 12, 23, 28, 30, 33, 36, + 42, +} + +var _formatfsm_indicies []byte = []byte{ + 1, 2, 3, 4, 5, 6, 7, 10, + 8, 9, 9, 0, 1, 2, 4, 5, + 6, 7, 10, 8, 9, 9, 0, 13, + 11, 12, 12, 0, 14, 0, 15, 14, + 0, 9, 9, 0, 16, 19, 17, 18, + 18, 0, 20, 3, +} + +var _formatfsm_trans_targs []byte = []byte{ + 0, 2, 2, 8, 2, 2, 3, 2, + 7, 8, 4, 3, 8, 4, 5, 6, + 3, 7, 8, 4, 1, +} + +var _formatfsm_trans_actions []byte = []byte{ + 7, 17, 9, 3, 15, 13, 25, 11, + 43, 29, 19, 27, 49, 46, 21, 0, + 37, 23, 40, 34, 1, +} + +var _formatfsm_eof_actions []byte = []byte{ + 0, 31, 31, 31, 31, 31, 31, 31, + 5, +} + +const formatfsm_start int = 8 +const formatfsm_first_final int = 8 +const formatfsm_error int = 0 + +const formatfsm_en_main int = 8 + +// line 20 "format_fsm.rl" + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + highestArgIdx := 0 // zero means "none", since arg numbers are 1-based + + // line 159 "format_fsm.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + // line 123 "format_fsm.go" + { + cs = formatfsm_start + } + + // line 128 "format_fsm.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_formatfsm_key_offsets[cs]) + _trans = int(_formatfsm_index_offsets[cs]) + + _klen = int(_formatfsm_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _formatfsm_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_formatfsm_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _formatfsm_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_formatfsm_indicies[_trans]) + cs = int(_formatfsm_trans_targs[_trans]) + + if _formatfsm_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_formatfsm_trans_actions[_trans]) + _nacts = uint(_formatfsm_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _formatfsm_actions[_acts-1] { + case 0: + // line 31 "format_fsm.rl" + + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + + case 1: + // line 40 "format_fsm.rl" + + buf.WriteByte(data[p]) + + case 4: + // line 51 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + case 5: + // line 58 "format_fsm.rl" + + verb.Sharp = true + + case 6: + // line 61 "format_fsm.rl" + + verb.Zero = true + + case 7: + // line 64 "format_fsm.rl" + + verb.Minus = true + + case 8: + // line 67 "format_fsm.rl" + + verb.Plus = true + + case 9: + // line 70 "format_fsm.rl" + + verb.Space = true + + case 10: + // line 74 "format_fsm.rl" + + verb.ArgNum = 0 + + case 11: + // line 77 "format_fsm.rl" + + verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0') + + case 12: + // line 81 "format_fsm.rl" + + verb.HasWidth = true + + case 13: + // line 84 "format_fsm.rl" + + verb.Width = 0 + + case 14: + // line 87 "format_fsm.rl" + + verb.Width = (10 * verb.Width) + (int(data[p]) - '0') + + case 15: + // line 91 "format_fsm.rl" + + verb.HasPrec = true + + case 16: + // line 94 "format_fsm.rl" + + verb.Prec = 0 + + case 17: + // line 97 "format_fsm.rl" + + verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0') + + case 18: + // line 101 "format_fsm.rl" + + verb.Mode = rune(data[p]) + te = p + 1 + verb.Raw = data[ts:te] + verb.Offset = ts + + if verb.ArgNum > highestArgIdx { + highestArgIdx = verb.ArgNum + } + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + + // line 330 "format_fsm.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _formatfsm_eof_actions[cs] + __nacts := uint(_formatfsm_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _formatfsm_actions[__acts-1] { + case 2: + // line 44 "format_fsm.rl" + + case 3: + // line 47 "format_fsm.rl" + + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + + case 4: + // line 51 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + // line 369 "format_fsm.go" + } + } + } + + _out: + { + } + } + + // line 177 "format_fsm.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + if highestArgIdx < len(a) { + // Extraneous args are an error, to more easily detect mistakes + firstBad := highestArgIdx + 1 + if highestArgIdx == 0 { + // Custom error message for this case + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; no verbs in format string") + } + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; only %d used by format string", highestArgIdx) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl new file mode 100644 index 000000000..3c642d9e1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl @@ -0,0 +1,198 @@ +// This file is generated from format_fsm.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + machine formatfsm; +}%% + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +%%{ + write data; +}%% + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + highestArgIdx := 0 // zero means "none", since arg numbers are 1-based + + %%{ + + action begin { + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + } + + action emit { + buf.WriteByte(fc); + } + + action finish_ok { + } + + action finish_err { + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + } + + action err_char { + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + } + + action flag_sharp { + verb.Sharp = true + } + action flag_zero { + verb.Zero = true + } + action flag_minus { + verb.Minus = true + } + action flag_plus { + verb.Plus = true + } + action flag_space { + verb.Space = true + } + + action argidx_reset { + verb.ArgNum = 0 + } + action argidx_num { + verb.ArgNum = (10 * verb.ArgNum) + (int(fc) - '0') + } + + action has_width { + verb.HasWidth = true + } + action width_reset { + verb.Width = 0 + } + action width_num { + verb.Width = (10 * verb.Width) + (int(fc) - '0') + } + + action has_prec { + verb.HasPrec = true + } + action prec_reset { + verb.Prec = 0 + } + action prec_num { + verb.Prec = (10 * verb.Prec) + (int(fc) - '0') + } + + action mode { + verb.Mode = rune(fc) + te = p+1 + verb.Raw = data[ts:te] + verb.Offset = ts + + if verb.ArgNum > highestArgIdx { + highestArgIdx = verb.ArgNum + } + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + } + + # a number that isn't zero and doesn't have a leading zero + num = [1-9] [0-9]*; + + flags = ( + '0' @flag_zero | + '#' @flag_sharp | + '-' @flag_minus | + '+' @flag_plus | + ' ' @flag_space + )*; + + argidx = (( + '[' (num $argidx_num) ']' + ) >argidx_reset)?; + + width = ( + ( num $width_num ) >width_reset %has_width + )?; + + precision = ( + ('.' ( digit* $prec_num )) >prec_reset %has_prec + )?; + + # We accept any letter here, but will be more picky in formatAppend + mode = ('a'..'z' | 'A'..'Z') @mode; + + fmt_verb = ( + '%' @begin + flags + width + precision + argidx + mode + ); + + main := ( + [^%] @emit | + '%%' @emit | + fmt_verb + )* @/finish_err %/finish_ok $!err_char; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + %%{ + write init; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + if highestArgIdx < len(a) { + // Extraneous args are an error, to more easily detect mistakes + firstBad := highestArgIdx+1 + if highestArgIdx == 0 { + // Custom error message for this case + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; no verbs in format string") + } + return buf.String(), function.NewArgErrorf(firstBad, "too many arguments; only %d used by format string", highestArgIdx) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go new file mode 100644 index 000000000..6b31f2661 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -0,0 +1,107 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var EqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]), nil + }, +}) + +var NotEqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]).Not(), nil + }, +}) + +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + + return convert.Convert(argVal, retType) + } + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// Equal determines whether the two given values are equal, returning a +// bool value. +func Equal(a cty.Value, b cty.Value) (cty.Value, error) { + return EqualFunc.Call([]cty.Value{a, b}) +} + +// NotEqual is the opposite of Equal. +func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) { + return NotEqualFunc.Call([]cty.Value{a, b}) +} + +// Coalesce returns the first of the given arguments that is not null. If +// all arguments are null, an error is produced. +func Coalesce(vals ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(vals) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go new file mode 100644 index 000000000..07901c65d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -0,0 +1,72 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +var JSONEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + val := args[0] + if !val.IsWhollyKnown() { + // We can't serialize unknowns, so if the value is unknown or + // contains any _nested_ unknowns then our result must be + // unknown. + return cty.UnknownVal(retType), nil + } + + buf, err := json.Marshal(val, val.Type()) + if err != nil { + return cty.NilVal, err + } + + return cty.StringVal(string(buf)), nil + }, +}) + +var JSONDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + buf := []byte(str.AsString()) + return json.ImpliedType(buf) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + buf := []byte(args[0].AsString()) + return json.Unmarshal(buf, retType) + }, +}) + +// JSONEncode returns a JSON serialization of the given value. +func JSONEncode(val cty.Value) (cty.Value, error) { + return JSONEncodeFunc.Call([]cty.Value{val}) +} + +// JSONDecode parses the given JSON string and, if it is valid, returns the +// value it represents. +// +// Note that applying JSONDecode to the result of JSONEncode may not produce +// an identically-typed result, since JSON encoding is lossy for cty Types. +// The resulting value will consist only of primitive types, object types, and +// tuple types. +func JSONDecode(str cty.Value) (cty.Value, error) { + return JSONDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go new file mode 100644 index 000000000..bd9b2e51b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -0,0 +1,428 @@ +package stdlib + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var AbsoluteFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Absolute(), nil + }, +}) + +var AddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Add(args[1]), nil + }, +}) + +var SubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Sub can panic if the input values are infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't subtract infinity from itself") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Subtract(args[1]), nil + }, +}) + +var MultiplyFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't multiply zero by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Multiply(args[1]), nil + }, +}) + +var DivideFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Quo can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't divide zero by zero or infinity by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Divide(args[1]), nil + }, +}) + +var ModuloFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't use modulo with zero and infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Modulo(args[1]), nil + }, +}) + +var GreaterThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThan(args[1]), nil + }, +}) + +var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThanOrEqualTo(args[1]), nil + }, +}) + +var LessThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThan(args[1]), nil + }, +}) + +var LessThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThanOrEqualTo(args[1]), nil + }, +}) + +var NegateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Negate(), nil + }, +}) + +var MinFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + min := cty.PositiveInfinity + for _, num := range args { + if num.LessThan(min).True() { + min = num + } + } + + return min, nil + }, +}) + +var MaxFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + max := cty.NegativeInfinity + for _, num := range args { + if num.GreaterThan(max).True() { + max = num + } + } + + return max, nil + }, +}) + +var IntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bf := args[0].AsBigFloat() + if bf.IsInt() { + return args[0], nil + } + bi, _ := bf.Int(nil) + bf = (&big.Float{}).SetInt(bi) + return cty.NumberVal(bf), nil + }, +}) + +// Absolute returns the magnitude of the given number, without its sign. +// That is, it turns negative values into positive values. +func Absolute(num cty.Value) (cty.Value, error) { + return AbsoluteFunc.Call([]cty.Value{num}) +} + +// Add returns the sum of the two given numbers. +func Add(a cty.Value, b cty.Value) (cty.Value, error) { + return AddFunc.Call([]cty.Value{a, b}) +} + +// Subtract returns the difference between the two given numbers. +func Subtract(a cty.Value, b cty.Value) (cty.Value, error) { + return SubtractFunc.Call([]cty.Value{a, b}) +} + +// Multiply returns the product of the two given numbers. +func Multiply(a cty.Value, b cty.Value) (cty.Value, error) { + return MultiplyFunc.Call([]cty.Value{a, b}) +} + +// Divide returns a divided by b, where both a and b are numbers. +func Divide(a cty.Value, b cty.Value) (cty.Value, error) { + return DivideFunc.Call([]cty.Value{a, b}) +} + +// Negate returns the given number multipled by -1. +func Negate(num cty.Value) (cty.Value, error) { + return NegateFunc.Call([]cty.Value{num}) +} + +// LessThan returns true if a is less than b. +func LessThan(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanFunc.Call([]cty.Value{a, b}) +} + +// LessThanOrEqualTo returns true if a is less than b. +func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// GreaterThan returns true if a is less than b. +func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanFunc.Call([]cty.Value{a, b}) +} + +// GreaterThanOrEqualTo returns true if a is less than b. +func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// Modulo returns the remainder of a divided by b under integer division, +// where both a and b are numbers. +func Modulo(a cty.Value, b cty.Value) (cty.Value, error) { + return ModuloFunc.Call([]cty.Value{a, b}) +} + +// Min returns the minimum number from the given numbers. +func Min(numbers ...cty.Value) (cty.Value, error) { + return MinFunc.Call(numbers) +} + +// Max returns the maximum number from the given numbers. +func Max(numbers ...cty.Value) (cty.Value, error) { + return MaxFunc.Call(numbers) +} + +// Int removes the fractional component of the given number returning an +// integer representing the whole number component, rounding towards zero. +// For example, -1.5 becomes -1. +// +// If an infinity is passed to Int, an error is returned. +func Int(num cty.Value) (cty.Value, error) { + if num == cty.PositiveInfinity || num == cty.NegativeInfinity { + return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer") + } + return IntFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go new file mode 100644 index 000000000..2dd6348a2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/regexp.go @@ -0,0 +1,233 @@ +package stdlib + +import ( + "fmt" + "regexp" + resyntax "regexp/syntax" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var RegexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pattern", + Type: cty.String, + }, + { + Name: "string", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + // We can't predict our type without seeing our pattern + return cty.DynamicPseudoType, nil + } + + retTy, err := regexPatternResultType(args[0].AsString()) + if err != nil { + err = function.NewArgError(0, err) + } + return retTy, err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + re, err := regexp.Compile(args[0].AsString()) + if err != nil { + // Should never happen, since we checked this in the Type function above. + return cty.NilVal, function.NewArgErrorf(0, "error parsing pattern: %s", err) + } + str := args[1].AsString() + + captureIdxs := re.FindStringSubmatchIndex(str) + if captureIdxs == nil { + return cty.NilVal, fmt.Errorf("pattern did not match any part of the given string") + } + + return regexPatternResult(re, str, captureIdxs, retType), nil + }, +}) + +var RegexAllFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "pattern", + Type: cty.String, + }, + { + Name: "string", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsKnown() { + // We can't predict our type without seeing our pattern, + // but we do know it'll always be a list of something. + return cty.List(cty.DynamicPseudoType), nil + } + + retTy, err := regexPatternResultType(args[0].AsString()) + if err != nil { + err = function.NewArgError(0, err) + } + return cty.List(retTy), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + if ety == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + re, err := regexp.Compile(args[0].AsString()) + if err != nil { + // Should never happen, since we checked this in the Type function above. + return cty.NilVal, function.NewArgErrorf(0, "error parsing pattern: %s", err) + } + str := args[1].AsString() + + captureIdxsEach := re.FindAllStringSubmatchIndex(str, -1) + if len(captureIdxsEach) == 0 { + return cty.ListValEmpty(ety), nil + } + + elems := make([]cty.Value, len(captureIdxsEach)) + for i, captureIdxs := range captureIdxsEach { + elems[i] = regexPatternResult(re, str, captureIdxs, ety) + } + return cty.ListVal(elems), nil + }, +}) + +// Regex is a function that extracts one or more substrings from a given +// string by applying a regular expression pattern, describing the first +// match. +// +// The return type depends on the composition of the capture groups (if any) +// in the pattern: +// +// - If there are no capture groups at all, the result is a single string +// representing the entire matched pattern. +// - If all of the capture groups are named, the result is an object whose +// keys are the named groups and whose values are their sub-matches, or +// null if a particular sub-group was inside another group that didn't +// match. +// - If none of the capture groups are named, the result is a tuple whose +// elements are the sub-groups in order and whose values are their +// sub-matches, or null if a particular sub-group was inside another group +// that didn't match. +// - It is invalid to use both named and un-named capture groups together in +// the same pattern. +// +// If the pattern doesn't match, this function returns an error. To test for +// a match, call RegexAll and check if the length of the result is greater +// than zero. +func Regex(pattern, str cty.Value) (cty.Value, error) { + return RegexFunc.Call([]cty.Value{pattern, str}) +} + +// RegexAll is similar to Regex but it finds all of the non-overlapping matches +// in the given string and returns a list of them. +// +// The result type is always a list, whose element type is deduced from the +// pattern in the same way as the return type for Regex is decided. +// +// If the pattern doesn't match at all, this function returns an empty list. +func RegexAll(pattern, str cty.Value) (cty.Value, error) { + return RegexAllFunc.Call([]cty.Value{pattern, str}) +} + +// regexPatternResultType parses the given regular expression pattern and +// returns the structural type that would be returned to represent its +// capture groups. +// +// Returns an error if parsing fails or if the pattern uses a mixture of +// named and unnamed capture groups, which is not permitted. +func regexPatternResultType(pattern string) (cty.Type, error) { + re, rawErr := regexp.Compile(pattern) + switch err := rawErr.(type) { + case *resyntax.Error: + return cty.NilType, fmt.Errorf("invalid regexp pattern: %s in %s", err.Code, err.Expr) + case error: + // Should never happen, since all regexp compile errors should + // be resyntax.Error, but just in case... + return cty.NilType, fmt.Errorf("error parsing pattern: %s", err) + } + + allNames := re.SubexpNames()[1:] + var names []string + unnamed := 0 + for _, name := range allNames { + if name == "" { + unnamed++ + } else { + if names == nil { + names = make([]string, 0, len(allNames)) + } + names = append(names, name) + } + } + switch { + case unnamed == 0 && len(names) == 0: + // If there are no capture groups at all then we'll return just a + // single string for the whole match. + return cty.String, nil + case unnamed > 0 && len(names) > 0: + return cty.NilType, fmt.Errorf("invalid regexp pattern: cannot mix both named and unnamed capture groups") + case unnamed > 0: + // For unnamed captures, we return a tuple of them all in order. + etys := make([]cty.Type, unnamed) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + // For named captures, we return an object using the capture names + // as keys. + atys := make(map[string]cty.Type, len(names)) + for _, name := range names { + atys[name] = cty.String + } + return cty.Object(atys), nil + } +} + +func regexPatternResult(re *regexp.Regexp, str string, captureIdxs []int, retType cty.Type) cty.Value { + switch { + case retType == cty.String: + start, end := captureIdxs[0], captureIdxs[1] + return cty.StringVal(str[start:end]) + case retType.IsTupleType(): + captureIdxs = captureIdxs[2:] // index 0 is the whole pattern span, which we ignore by skipping one pair + vals := make([]cty.Value, len(captureIdxs)/2) + for i := range vals { + start, end := captureIdxs[i*2], captureIdxs[i*2+1] + if start < 0 || end < 0 { + vals[i] = cty.NullVal(cty.String) // Did not match anything because containing group didn't match + continue + } + vals[i] = cty.StringVal(str[start:end]) + } + return cty.TupleVal(vals) + case retType.IsObjectType(): + captureIdxs = captureIdxs[2:] // index 0 is the whole pattern span, which we ignore by skipping one pair + vals := make(map[string]cty.Value, len(captureIdxs)/2) + names := re.SubexpNames()[1:] + for i, name := range names { + start, end := captureIdxs[i*2], captureIdxs[i*2+1] + if start < 0 || end < 0 { + vals[name] = cty.NullVal(cty.String) // Did not match anything because containing group didn't match + continue + } + vals[name] = cty.StringVal(str[start:end]) + } + return cty.ObjectVal(vals) + default: + // Should never happen + panic(fmt.Sprintf("invalid return type %#v", retType)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go new file mode 100644 index 000000000..d3cc341dd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -0,0 +1,218 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var ConcatFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "seqs", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + if args[0].Type().IsListType() { + // Possibly we're going to return a list, if all of our other + // args are also lists and we can find a common element type. + tys := make([]cty.Type, len(args)) + for i, val := range args { + ty := val.Type() + if !ty.IsListType() { + tys = nil + break + } + tys[i] = ty + } + + if tys != nil { + commonType, _ := convert.UnifyUnsafe(tys) + if commonType != cty.NilType { + return commonType, nil + } + } + } + + etys := make([]cty.Type, 0, len(args)) + for i, val := range args { + ety := val.Type() + switch { + case ety.IsTupleType(): + etys = append(etys, ety.TupleElementTypes()...) + case ety.IsListType(): + if !val.IsKnown() { + // We need to know the list to count its elements to + // build our tuple type, so any concat of an unknown + // list can't be typed yet. + return cty.DynamicPseudoType, nil + } + + l := val.LengthInt() + subEty := ety.ElementType() + for j := 0; j < l; j++ { + etys = append(etys, subEty) + } + default: + return cty.NilType, function.NewArgErrorf( + i, "all arguments must be lists or tuples; got %s", + ety.FriendlyName(), + ) + } + } + return cty.Tuple(etys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + switch { + case retType.IsListType(): + // If retType is a list type then we know that all of the + // given values will be lists and that they will either be of + // retType or of something we can convert to retType. + vals := make([]cty.Value, 0, len(args)) + for i, list := range args { + list, err = convert.Convert(list, retType) + if err != nil { + // Conversion might fail because we used UnifyUnsafe + // to choose our return type. + return cty.NilVal, function.NewArgError(i, err) + } + + it := list.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + if len(vals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + return cty.ListVal(vals), nil + case retType.IsTupleType(): + // If retType is a tuple type then we could have a mixture of + // lists and tuples but we know they all have known values + // (because our params don't AllowUnknown) and we know that + // concatenating them all together will produce a tuple of + // retType because of the work we did in the Type function above. + vals := make([]cty.Value, 0, len(args)) + + for _, seq := range args { + // Both lists and tuples support ElementIterator, so this is easy. + it := seq.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + + return cty.TupleVal(vals), nil + default: + // should never happen if Type is working correctly above + panic("unsupported return type") + } + }, +}) + +var RangeFunc = function.New(&function.Spec{ + VarParam: &function.Parameter{ + Name: "params", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.List(cty.Number)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var start, end, step cty.Value + switch len(args) { + case 1: + if args[0].LessThan(cty.Zero).True() { + start, end, step = cty.Zero, args[0], cty.NumberIntVal(-1) + } else { + start, end, step = cty.Zero, args[0], cty.NumberIntVal(1) + } + case 2: + if args[1].LessThan(args[0]).True() { + start, end, step = args[0], args[1], cty.NumberIntVal(-1) + } else { + start, end, step = args[0], args[1], cty.NumberIntVal(1) + } + case 3: + start, end, step = args[0], args[1], args[2] + default: + return cty.NilVal, fmt.Errorf("must have one, two, or three arguments") + } + + var vals []cty.Value + + if step == cty.Zero { + return cty.NilVal, function.NewArgErrorf(2, "step must not be zero") + } + down := step.LessThan(cty.Zero).True() + + if down { + if end.GreaterThan(start).True() { + return cty.NilVal, function.NewArgErrorf(1, "end must be less than start when step is negative") + } + } else { + if end.LessThan(start).True() { + return cty.NilVal, function.NewArgErrorf(1, "end must be greater than start when step is positive") + } + } + + num := start + for { + if down { + if num.LessThanOrEqualTo(end).True() { + break + } + } else { + if num.GreaterThanOrEqualTo(end).True() { + break + } + } + if len(vals) >= 1024 { + // Artificial limit to prevent bad arguments from consuming huge amounts of memory + return cty.NilVal, fmt.Errorf("more than 1024 values were generated; either decrease the difference between start and end or use a smaller step") + } + vals = append(vals, num) + num = num.Add(step) + } + if len(vals) == 0 { + return cty.ListValEmpty(cty.Number), nil + } + return cty.ListVal(vals), nil + }, +}) + +// Concat takes one or more sequences (lists or tuples) and returns the single +// sequence that results from concatenating them together in order. +// +// If all of the given sequences are lists of the same element type then the +// result is a list of that type. Otherwise, the result is a of a tuple type +// constructed from the given sequence types. +func Concat(seqs ...cty.Value) (cty.Value, error) { + return ConcatFunc.Call(seqs) +} + +// Range creates a list of numbers by starting from the given starting value, +// then adding the given step value until the result is greater than or +// equal to the given stopping value. Each intermediate result becomes an +// element in the resulting list. +// +// When all three parameters are set, the order is (start, end, step). If +// only two parameters are set, they are the start and end respectively and +// step defaults to 1. If only one argument is set, it gives the end value +// with start defaulting to 0 and step defaulting to 1. +// +// Because the resulting list must be fully buffered in memory, there is an +// artificial cap of 1024 elements, after which this function will return +// an error to avoid consuming unbounded amounts of memory. The Range function +// is primarily intended for creating small lists of indices to iterate over, +// so there should be no reason to generate huge lists with it. +func Range(params ...cty.Value) (cty.Value, error) { + return RangeFunc.Call(params) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go new file mode 100644 index 000000000..100078fdc --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -0,0 +1,195 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var SetHasElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "elem", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasElement(args[1]), nil + }, +}) + +var SetUnionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Union(s2) + }), +}) + +var SetIntersectionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Intersection(s2) + }), +}) + +var SetSubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +// SetHasElement determines whether the given set contains the given value as an +// element. +func SetHasElement(set cty.Value, elem cty.Value) (cty.Value, error) { + return SetHasElementFunc.Call([]cty.Value{set, elem}) +} + +// SetUnion returns a new set containing all of the elements from the given +// sets, which must have element types that can all be converted to some +// common type using the standard type unification rules. If conversion +// is not possible, an error is returned. +// +// The union operation is performed after type conversion, which may result +// in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetUnion(sets ...cty.Value) (cty.Value, error) { + return SetUnionFunc.Call(sets) +} + +// Intersection returns a new set containing the elements that exist +// in all of the given sets, which must have element types that can all be +// converted to some common type using the standard type unification rules. +// If conversion is not possible, an error is returned. +// +// The intersection operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetIntersection(sets ...cty.Value) (cty.Value, error) { + return SetIntersectionFunc.Call(sets) +} + +// SetSubtract returns a new set containing the elements from the +// first set that are not present in the second set. The sets must have +// element types that can both be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The subtract operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSubtract(a, b cty.Value) (cty.Value, error) { + return SetSubtractFunc.Call([]cty.Value{a, b}) +} + +// SetSymmetricDifference returns a new set containing elements that appear +// in any of the given sets but not multiple. The sets must have +// element types that can all be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The difference operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSymmetricDifference(sets ...cty.Value) (cty.Value, error) { + return SetSymmetricDifferenceFunc.Call(sets) +} + +func setOperationReturnType(args []cty.Value) (ret cty.Type, err error) { + var etys []cty.Type + for _, arg := range args { + etys = append(etys, arg.Type().ElementType()) + } + newEty, _ := convert.UnifyUnsafe(etys) + if newEty == cty.NilType { + return cty.NilType, fmt.Errorf("given sets must all have compatible element types") + } + return cty.Set(newEty), nil +} + +func setOperationImpl(f func(s1, s2 cty.ValueSet) cty.ValueSet) function.ImplFunc { + return func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + first := args[0] + first, err = convert.Convert(first, retType) + if err != nil { + return cty.NilVal, function.NewArgError(0, err) + } + + set := first.AsValueSet() + for i, arg := range args[1:] { + arg, err := convert.Convert(arg, retType) + if err != nil { + return cty.NilVal, function.NewArgError(i+1, err) + } + + argSet := arg.AsValueSet() + set = f(set, argSet) + } + return cty.SetValFromValueSet(set), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go new file mode 100644 index 000000000..d7c89fa82 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -0,0 +1,234 @@ +package stdlib + +import ( + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "github.com/apparentlymart/go-textseg/textseg" +) + +var UpperFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToUpper(in) + return cty.StringVal(out), nil + }, +}) + +var LowerFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToLower(in) + return cty.StringVal(out), nil + }, +}) + +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + out := make([]byte, len(in)) + pos := len(out) + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + cluster := in[i : i+d] + pos -= len(cluster) + copy(out[pos:], cluster) + i += d + } + + return cty.StringVal(string(out)), nil + }, +}) + +var StrlenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + l := 0 + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + + return cty.NumberIntVal(int64(l)), nil + }, +}) + +var SubstrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 { + totalLenNum, err := Strlen(args[0]) + if err != nil { + // should never happen + panic("Stdlen returned an error") + } + + var totalLen int + err = gocty.FromCtyValue(totalLenNum, &totalLen) + if err != nil { + // should never happen + panic("Stdlen returned a non-int number") + } + + offset += totalLen + } + + sub := in + pos := 0 + var i int + + // First we'll seek forward to our offset + if offset > 0 { + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == offset { + break + } + if i >= len(in) { + return cty.StringVal(""), nil + } + } + + sub = sub[i:] + } + + if length < 0 { + // Taking the remainder of the string is a fast path since + // we can just return the rest of the buffer verbatim. + return cty.StringVal(string(sub)), nil + } + + // Otherwise we need to start seeking forward again until we + // reach the length we want. + pos = 0 + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == length { + break + } + } + + sub = sub[:i] + + return cty.StringVal(string(sub)), nil + }, +}) + +// Upper is a Function that converts a given string to uppercase. +func Upper(str cty.Value) (cty.Value, error) { + return UpperFunc.Call([]cty.Value{str}) +} + +// Lower is a Function that converts a given string to lowercase. +func Lower(str cty.Value) (cty.Value, error) { + return LowerFunc.Call([]cty.Value{str}) +} + +// Reverse is a Function that reverses the order of the characters in the +// given string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Reverse(str cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{str}) +} + +// Strlen is a Function that returns the length of the given string in +// characters. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Strlen(str cty.Value) (cty.Value, error) { + return StrlenFunc.Call([]cty.Value{str}) +} + +// Substr is a Function that extracts a sequence of characters from another +// string and creates a new string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +// +// The "offset" index may be negative, in which case it is relative to the +// end of the given string. +// +// The "length" may be -1, in which case the remainder of the string after +// the given offset will be returned. +func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return SubstrFunc.Call([]cty.Value{str, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go new file mode 100644 index 000000000..3495550af --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go @@ -0,0 +1,31 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Unpredictable wraps a given function such that it retains the same arguments +// and type checking behavior but will return an unknown value when called. +// +// It is recommended that most functions be "pure", which is to say that they +// will always produce the same value given particular input. However, +// sometimes it is necessary to offer functions whose behavior depends on +// some external state, such as reading a file or determining the current time. +// In such cases, an unpredictable wrapper might be used to stand in for +// the function during some sort of prior "checking" phase in order to delay +// the actual effect until later. +// +// While Unpredictable can support a function that isn't pure in its +// implementation, it still expects a function to be pure in its type checking +// behavior, except for the special case of returning cty.DynamicPseudoType +// if it is not yet able to predict its return value based on current argument +// information. +func Unpredictable(f Function) Function { + newSpec := *f.spec // shallow copy + newSpec.Impl = unpredictableImpl + return New(&newSpec) +} + +func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(retType), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 000000000..a77dace27 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go new file mode 100644 index 000000000..a5177d22b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go new file mode 100644 index 000000000..94ffd2fb7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go new file mode 100644 index 000000000..ca9de21d2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go new file mode 100644 index 000000000..e9c2599e6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go new file mode 100644 index 000000000..ce4c8f1e9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 000000000..1b88e9fa0 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 000000000..c421a62ed --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/vendor/github.com/zclconf/go-cty/cty/json/doc.go new file mode 100644 index 000000000..8916513d6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go new file mode 100644 index 000000000..f7bea1a2f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -0,0 +1,189 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/vendor/github.com/zclconf/go-cty/cty/json/simple.go new file mode 100644 index 000000000..507c9cc2c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type.go b/vendor/github.com/zclconf/go-cty/cty/json/type.go new file mode 100644 index 000000000..9131c6c77 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go new file mode 100644 index 000000000..0fa13f6c5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go new file mode 100644 index 000000000..38106455f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/value.go b/vendor/github.com/zclconf/go-cty/cty/json/value.go new file mode 100644 index 000000000..f2f7dd56c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 000000000..2ef02a12f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 000000000..82d36c628 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go new file mode 100644 index 000000000..1eb99f28a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/doc.go @@ -0,0 +1,14 @@ +// Package msgpack provides functions for serializing cty values in the +// msgpack encoding, and decoding them again. +// +// If the same type information is provided both at encoding and decoding time +// then values can be round-tripped without loss, except for capsule types +// which are not currently supported. +// +// If any unknown values are passed to Marshal then they will be represented +// using a msgpack extension with type code zero, which is understood by +// the Unmarshal function within this package but will not be understood by +// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values +// are used if interoperability with other msgpack implementations is +// required. +package msgpack diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go new file mode 100644 index 000000000..1b631d0a1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/dynamic.go @@ -0,0 +1,31 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" +) + +type dynamicVal struct { + Value cty.Value + Path cty.Path +} + +func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { + // Rather than defining a msgpack-specific serialization of types, + // instead we use the existing JSON serialization. + typeJSON, err := dv.Value.Type().MarshalJSON() + if err != nil { + return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) + } + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + enc.EncodeArrayLen(2) + enc.EncodeBytes(typeJSON) + err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go new file mode 100644 index 000000000..6db0815e4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/infinity.go @@ -0,0 +1,8 @@ +package msgpack + +import ( + "math" +) + +var negativeInfinity = math.Inf(-1) +var positiveInfinity = math.Inf(1) diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go new file mode 100644 index 000000000..87b096ca4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,207 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/vmihailenco/msgpack" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 000000000..6f6022e4d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go new file mode 100644 index 000000000..6507bc4be --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unknown.go @@ -0,0 +1,16 @@ +package msgpack + +type unknownType struct{} + +var unknownVal = unknownType{} + +// unknownValBytes is the raw bytes of the msgpack fixext1 value we +// write to represent an unknown value. It's an extension value of +// type zero whose value is irrelevant. Since it's irrelevant, we +// set it to a single byte whose value is also zero, since that's +// the most compact possible representation. +var unknownValBytes = []byte{0xd4, 0, 0} + +func (uv unknownType) MarshalMsgpack() ([]byte, error) { + return unknownValBytes, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 000000000..51bb76a8a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" + "github.com/zclconf/go-cty/cty" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 000000000..d58d0287b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 000000000..187d38751 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 000000000..b31444954 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,250 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path_set.go b/vendor/github.com/zclconf/go-cty/cty/path_set.go new file mode 100644 index 000000000..f1c892b9d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/zclconf/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 000000000..7b3d1196c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 000000000..da2978f65 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 000000000..4a60494f9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 000000000..fd1555f21 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 000000000..51f744b5e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 000000000..b4fb316f1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go new file mode 100644 index 000000000..a88ddaffb --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -0,0 +1,126 @@ +package cty + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +func (s ValueSet) requireElementType(v Value) { + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 000000000..3fd4fb2df --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,216 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes := makeSetHashBytes(val) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h := makeSetHashBytes(v1v) + v2h := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 000000000..cbc3706f2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 000000000..798cacd63 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 000000000..730cb9862 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 000000000..476eeea87 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 000000000..e1e220aab --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 000000000..e54179eb1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go new file mode 100644 index 000000000..ba926475c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 000000000..80cb8f76f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,98 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 000000000..3deeba3bd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,314 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 000000000..afd621cf4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,1138 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 000000000..a6943babe --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore new file mode 100644 index 000000000..74a6db472 --- /dev/null +++ b/vendor/go.opencensus.io/.gitignore @@ -0,0 +1,9 @@ +/.idea/ + +# go.opencensus.io/exporter/aws +/exporter/aws/ + +# Exclude vendor, use dep ensure after checkout: +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/.travis.yml b/vendor/go.opencensus.io/.travis.yml new file mode 100644 index 000000000..bd6b66ee8 --- /dev/null +++ b/vendor/go.opencensus.io/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go_import_path: go.opencensus.io + +go: + - 1.11.x + +env: + global: + GO111MODULE=on + +before_script: + - make install-tools + +script: + - make travis-ci + - go run internal/check/version.go # TODO move this to makefile diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md new file mode 100644 index 000000000..1ba3962c8 --- /dev/null +++ b/vendor/go.opencensus.io/CONTRIBUTING.md @@ -0,0 +1,63 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ make install-tools # Only first time. +$ make +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. + +## General Notes +This project uses Appveyor and Travis for CI. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock new file mode 100644 index 000000000..3be12ac8f --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.lock @@ -0,0 +1,231 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" + name = "git.apache.org/thrift.git" + packages = ["lib/go/thrift"] + pruneopts = "UT" + revision = "6e67faa92827ece022380b211c2caaadd6145bf5" + source = "github.com/apache/thrift" + +[[projects]] + branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + +[[projects]] + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" + name = "github.com/openzipkin/zipkin-go" + packages = [ + ".", + "idgenerator", + "model", + "propagation", + "reporter", + "reporter/http", + ] + pruneopts = "UT" + revision = "d455a5674050831c1e187644faa4046d653433c2" + version = "v0.1.1" + +[[projects]] + digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" + +[[projects]] + branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + +[[projects]] + branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs", + ] + pruneopts = "UT" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" + +[[projects]] + branch = "master" + digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "922f4815f713f213882e8ef45e0d315b164d705c" + +[[projects]] + branch = "master" + digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" + +[[projects]] + branch = "master" + digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" + +[[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "e21acd801f91da814261b938941d193bb036441a" + +[[projects]] + branch = "master" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" + +[[projects]] + digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" + version = "v1.14.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "git.apache.org/thrift.git/lib/go/thrift", + "github.com/golang/protobuf/proto", + "github.com/openzipkin/zipkin-go", + "github.com/openzipkin/zipkin-go/model", + "github.com/openzipkin/zipkin-go/reporter", + "github.com/openzipkin/zipkin-go/reporter/http", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "golang.org/x/net/context", + "golang.org/x/net/http2", + "google.golang.org/api/support/bundler", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/metadata", + "google.golang.org/grpc/stats", + "google.golang.org/grpc/status", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml new file mode 100644 index 000000000..a9f3cd68e --- /dev/null +++ b/vendor/go.opencensus.io/Gopkg.toml @@ -0,0 +1,36 @@ +# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" +# to avoid locking to a particular minor version which can cause dep to not be +# able to find a satisfying dependency graph. + +[[constraint]] + branch = "master" + name = "git.apache.org/thrift.git" + source = "github.com/apache/thrift" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.0.0" + +[[constraint]] + name = "github.com/openzipkin/zipkin-go" + version = ">=0.1.0" + +[[constraint]] + name = "github.com/prometheus/client_golang" + version = ">=0.8.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + branch = "master" + name = "google.golang.org/api" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.11.3" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 000000000..7a4a3ea24 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile new file mode 100644 index 000000000..457866cb1 --- /dev/null +++ b/vendor/go.opencensus.io/Makefile @@ -0,0 +1,96 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOFMT=gofmt +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" +TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + +.DEFAULT_GOAL := fmt-lint-vet-embedmd-test + +.PHONY: fmt-lint-vet-embedmd-test +fmt-lint-vet-embedmd-test: fmt lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: fmt lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: fmt +fmt: + @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ + if [ "$$FMTOUT" ]; then \ + echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ + echo "$$FMTOUT\n"; \ + exit 1; \ + else \ + echo "Fmt finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/lint/golint + go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md new file mode 100644 index 000000000..fabab2e06 --- /dev/null +++ b/vendor/go.opencensus.io/README.md @@ -0,0 +1,263 @@ +# OpenCensus Libraries for Go + +[![Build Status][travis-image]][travis-url] +[![Windows Build Status][appveyor-image]][appveyor-url] +[![GoDoc][godoc-image]][godoc-url] +[![Gitter chat][gitter-image]][gitter-url] + +OpenCensus Go is a Go implementation of OpenCensus, a toolkit for +collecting application performance and behavior monitoring data. +Currently it consists of three major components: tags, stats and tracing. + +## Installation + +``` +$ go get -u go.opencensus.io +``` + +The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). +The use of vendoring or a dependency management tool is recommended. + +## Prerequisites + +OpenCensus Go libraries require Go 1.8 or later. + +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + +## Exporters + +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): + +* [Prometheus][exporter-prom] for stats +* [OpenZipkin][exporter-zipkin] for traces +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces +* [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +## Tags + +Tags represent propagated key-value pairs. They are propagated using `context.Context` +in the same process or can be encoded to be transmitted on the wire. Usually, this will +be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` +for gRPC. + +Package `tag` allows adding or modifying tags in the current context. + +[embedmd]:# (internal/readme/tags.go new) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Upsert(userIDKey, "cde36753ed"), +) +if err != nil { + log.Fatal(err) +} +``` + +## Stats + +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. + +OpenCensus stats collection happens in two stages: + +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data + +### Recording + +Measurements are data points associated with a measure. +Recording implicitly tags the set of Measurements with the tags from the +provided context: + +[embedmd]:# (internal/readme/stats.go record) +```go +stats.Record(ctx, videoSize.M(102478)) +``` + +### Views + +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (measurements). + +Views have two parts: the tags to group by and the aggregation type used. + +Currently three types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. + +[embedmd]:# (internal/readme/stats.go aggs) +```go +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +``` + +Here we create a view with the DistributionAggregation over our measure. + +[embedmd]:# (internal/readme/stats.go view) +```go +if err := view.Register(&view.View{ + Name: "example.com/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), +}); err != nil { + log.Fatalf("Failed to register view: %v", err) +} +``` + +Register begins collecting data for the view. Registered views' data will be +exported via the registered exporters. + +## Traces + +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + +## Profiles + +OpenCensus tags can be applied as profiler labels +for users who are on Go 1.9 and above. + +[embedmd]:# (internal/readme/tags.go profiler) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Insert(userIDKey, "fff0989878"), +) +if err != nil { + log.Fatal(err) +} +tag.Do(ctx, func(ctx context.Context) { + // Do work. + // When profiling is on, samples will be + // recorded with the key/values from the tag map. +}) +``` + +A screenshot of the CPU profile from the program above: + +![CPU profile](https://i.imgur.com/jBKjlkw.png) + +## Deprecation Policy + +Before version 1.0.0, the following deprecation policy will be observed: + +No backwards-incompatible changes will be made except for the removal of symbols that have +been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release +removing the *Deprecated* functionality will be made no sooner than 28 days after the first +release in which the functionality was marked *Deprecated*. + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go +[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true +[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master +[godoc-image]: https://godoc.org/go.opencensus.io?status.svg +[godoc-url]: https://godoc.org/go.opencensus.io +[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg +[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap +[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace + +[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus +[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver +[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml new file mode 100644 index 000000000..12bd7c4c7 --- /dev/null +++ b/vendor/go.opencensus.io/appveyor.yml @@ -0,0 +1,25 @@ +version: "{build}" + +platform: x64 + +clone_folder: c:\gopath\src\go.opencensus.io + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 + +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 + - go version + - go env + +build: false +deploy: false + +test_script: + - cd %APPVEYOR_BUILD_FOLDER% + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod new file mode 100644 index 000000000..cb4de80f3 --- /dev/null +++ b/vendor/go.opencensus.io/go.mod @@ -0,0 +1,12 @@ +module go.opencensus.io + +require ( + github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.3.0 + github.com/hashicorp/golang-lru v0.5.1 + golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect + golang.org/x/text v0.3.2 // indirect + google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect + google.golang.org/grpc v1.20.1 +) diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum new file mode 100644 index 000000000..0b948c2b4 --- /dev/null +++ b/vendor/go.opencensus.io/go.sum @@ -0,0 +1,61 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 000000000..9a638781c --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Now().Sub(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 000000000..de8ccf236 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 000000000..41b2c3fc0 --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 000000000..073af7b47 --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 000000000..52a7b3bf8 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 000000000..12695ce2d --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 000000000..aadae41e6 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 000000000..8293712c7 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 000000000..7fe057b19 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 000000000..c3f8ec27b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 000000000..b483a1371 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 000000000..ca1f39049 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 000000000..6cee9ed17 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 000000000..626d73645 --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.22.0" +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 000000000..da815b2a7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 000000000..17142aabe --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 000000000..10e626b16 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 000000000..2f1c7f006 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 000000000..5e6a34307 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 000000000..4f6404fa7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,449 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + int64(r.ContentLength), -1) + } + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 000000000..05c6c56cc --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 000000000..63bbcda5e --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host, _ = tag.NewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode, _ = tag.NewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path, _ = tag.NewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 000000000..c23b97fb1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,239 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + } + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 000000000..7d75cae2b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 000000000..b1764e1d3 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 000000000..00d473ee0 --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,69 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 000000000..36935e629 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 000000000..1ffd3cefc --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 000000000..f02c1eda8 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 000000000..d101d7973 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 000000000..ad4691184 --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } + recorder := internal.DefaultRecorder + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return nil + } + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } + } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 000000000..6931a5f29 --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 000000000..b7f169b4a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,120 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func() AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func() AggregationData { + return &CountData{} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func() AggregationData { + return &SumData{} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// An distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + return &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + newData: func() AggregationData { + return newDistributionData(bounds) + }, + } +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func() AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 000000000..d500e67f7 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,293 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values +} + +func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 000000000..8a6a2c0fd --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,86 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData() + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + vb := &tagencoding.Values{ + Buffer: make([]byte, len(keys)), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 000000000..dced225c3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 000000000..7cb59718f --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,58 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +import "sync" + +var ( + exportersMu sync.RWMutex // guards exporters + exporters = make(map[Exporter]struct{}) +) + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + exporters[e] = struct{}{} +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + exportersMu.Lock() + defer exportersMu.Unlock() + + delete(exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 000000000..37f88e1d9 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function to be before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function tp apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 000000000..f67b5c464 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,140 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLabelKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLabelKeys(v), + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 000000000..2f3c018af --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,281 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = newWorker() + go defaultWorker.start() + internal.DefaultRecorder = record +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + startTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex +} + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + defaultWorker.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + defaultWorker.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + defaultWorker.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), + } + defaultWorker.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + defaultWorker.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +func newWorker() *worker { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + startTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + } +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage(time.Now()) + case <-w.quit: + w.timer.Stop() + close(w.c) + w.done <- true + return + } + } +} + +func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + + w.quit <- true + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + +func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() + for _, v := range w.views { + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 000000000..0267e179a --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,186 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(name) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 000000000..b27d1b26b --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 000000000..da16b74e4 --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 000000000..4e63d08c9 --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,45 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey consequently with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// MustNewKey creates or retrieves a string key identified by name. +// An invalid key name raises a panic. +func MustNewKey(name string) Key { + k, err := NewKey(name) + if err != nil { + panic(err) + } + return k +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 000000000..0272ef85a --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +type tagContent struct { + value string + m metadatas +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]tagContent +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v.value, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) update(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + m.m[k] = tagContent{value: v, m: md} + } +} + +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]tagContent)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v.value) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v.value, v.m) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 000000000..f8b582761 --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,239 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(byte(tagsVersionID)) + for k, v := range m.m { + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 000000000..6571a583e --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 000000000..b34d95e34 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v.value) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 000000000..83adbce56 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,23 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 000000000..0939fc674 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 000000000..0c54492a2 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,119 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 000000000..775f8274f --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 000000000..04b1ee4f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + + +Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 000000000..ffc264f23 --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 000000000..e0d9a4b99 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 000000000..7e808d8f3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 000000000..3f80a3368 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 000000000..1eb190a96 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 000000000..71c10f9e3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 000000000..fbabad34c --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 000000000..c442d9902 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,306 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for span := range s.active { + out = append(out, span.makeSpanData()) + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[*Span]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[*Span]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span *Span) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span *Span, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 000000000..ec60effd1 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 000000000..38ead7bf0 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,598 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type Span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + p.addChild() + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { + span := &Span{} + span.spanContext = parent + + cfg := config.Load().(*Config) + + if !hasParent { + span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + span.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: span.spanContext.TraceID, + SpanID: span.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !span.spanContext.IsSampled() { + return span + } + + span.data = &SpanData{ + SpanContext: span.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + span.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + span.spanStore = ss + ss.add(span) + } + } + + return span +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *Span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +// copyAttributes copies a slice of Attributes into a map. +func copyAttributes(m map[string]interface{}, attributes []Attribute) { + for _, a := range attributes { + m[a.key] = a.value + } +} + +func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...interface{}) { + now := time.Now() + msg := fmt.Sprintf(format, a...) + var m map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + m = make(map[string]interface{}) + copyAttributes(m, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: msg, + Attributes: m, + }) + s.mu.Unlock() +} + +func (s *Span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var a map[string]interface{} + s.mu.Lock() + if len(attributes) != 0 { + a = make(map[string]interface{}) + copyAttributes(a, attributes) + } + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: a, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.lazyPrintfInternal(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *Span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + gen := &defaultIDGenerator{} + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 000000000..b7d8aaf28 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 000000000..e25419859 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 000000000..2d6c713eb --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 000000000..2b00ddba0 --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 000000000..1fbd3e976 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 000000000..fc3116090 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 000000000..aeb73f81a --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,295 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 000000000..9d80f1952 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 000000000..213bf204a --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,99 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +// +// Blowfish is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See https://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 000000000..d04077595 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// https://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go new file mode 100644 index 000000000..ddcbeb6f2 --- /dev/null +++ b/vendor/golang.org/x/crypto/cast5/cast5.go @@ -0,0 +1,533 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cast5 implements CAST5, as defined in RFC 2144. +// +// CAST5 is a legacy cipher and its short block size makes it vulnerable to +// birthday bound attacks (see https://sweet32.info). It should only be used +// where compatibility with legacy systems, not security, is the goal. +// +// Deprecated: any new system should use AES (from crypto/aes, if necessary in +// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package cast5 // import "golang.org/x/crypto/cast5" + +import "errors" + +const BlockSize = 8 +const KeySize = 16 + +type Cipher struct { + masking [16]uint32 + rotate [16]uint8 +} + +func NewCipher(key []byte) (c *Cipher, err error) { + if len(key) != KeySize { + return nil, errors.New("CAST5: keys must be 16 bytes") + } + + c = new(Cipher) + c.keySchedule(key) + return +} + +func (c *Cipher) BlockSize() int { + return BlockSize +} + +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + + l, r = r, l^f1(r, c.masking[15], c.rotate[15]) + l, r = r, l^f3(r, c.masking[14], c.rotate[14]) + l, r = r, l^f2(r, c.masking[13], c.rotate[13]) + l, r = r, l^f1(r, c.masking[12], c.rotate[12]) + + l, r = r, l^f3(r, c.masking[11], c.rotate[11]) + l, r = r, l^f2(r, c.masking[10], c.rotate[10]) + l, r = r, l^f1(r, c.masking[9], c.rotate[9]) + l, r = r, l^f3(r, c.masking[8], c.rotate[8]) + + l, r = r, l^f2(r, c.masking[7], c.rotate[7]) + l, r = r, l^f1(r, c.masking[6], c.rotate[6]) + l, r = r, l^f3(r, c.masking[5], c.rotate[5]) + l, r = r, l^f2(r, c.masking[4], c.rotate[4]) + + l, r = r, l^f1(r, c.masking[3], c.rotate[3]) + l, r = r, l^f3(r, c.masking[2], c.rotate[2]) + l, r = r, l^f2(r, c.masking[1], c.rotate[1]) + l, r = r, l^f1(r, c.masking[0], c.rotate[0]) + + dst[0] = uint8(r >> 24) + dst[1] = uint8(r >> 16) + dst[2] = uint8(r >> 8) + dst[3] = uint8(r) + dst[4] = uint8(l >> 24) + dst[5] = uint8(l >> 16) + dst[6] = uint8(l >> 8) + dst[7] = uint8(l) +} + +type keyScheduleA [4][7]uint8 +type keyScheduleB [4][5]uint8 + +// keyScheduleRound contains the magic values for a round of the key schedule. +// The keyScheduleA deals with the lines like: +// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] +// Conceptually, both x and z are in the same array, x first. The first +// element describes which word of this array gets written to and the +// second, which word gets read. So, for the line above, it's "4, 0", because +// it's writing to the first word of z, which, being after x, is word 4, and +// reading from the first word of x: word 0. +// +// Next are the indexes into the S-boxes. Now the array is treated as bytes. So +// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear +// that it's z that we're indexing. +// +// keyScheduleB deals with lines like: +// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] +// "K1" is ignored because key words are always written in order. So the five +// elements are the S-box indexes. They use the same form as in keyScheduleA, +// above. + +type keyScheduleRound struct{} +type keySchedule []keyScheduleRound + +var schedule = []struct { + a keyScheduleA + b keyScheduleB +}{ + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, + {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, + {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, + {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {3, 2, 0xc, 0xd, 8}, + {1, 0, 0xe, 0xf, 0xd}, + {7, 6, 8, 9, 3}, + {5, 4, 0xa, 0xb, 7}, + }, + }, + { + keyScheduleA{ + {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, + {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, + {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, + {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, + }, + keyScheduleB{ + {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, + {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, + {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, + {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, + }, + }, + { + keyScheduleA{ + {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, + {1, 4, 0, 2, 1, 3, 16 + 2}, + {2, 5, 7, 6, 5, 4, 16 + 1}, + {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, + }, + keyScheduleB{ + {8, 9, 7, 6, 3}, + {0xa, 0xb, 5, 4, 7}, + {0xc, 0xd, 3, 2, 8}, + {0xe, 0xf, 1, 0, 0xd}, + }, + }, +} + +func (c *Cipher) keySchedule(in []byte) { + var t [8]uint32 + var k [32]uint32 + + for i := 0; i < 4; i++ { + j := i * 4 + t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) + } + + x := []byte{6, 7, 4, 5} + ki := 0 + + for half := 0; half < 2; half++ { + for _, round := range schedule { + for j := 0; j < 4; j++ { + var a [7]uint8 + copy(a[:], round.a[j][:]) + w := t[a[1]] + w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] + w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] + w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] + w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] + w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] + t[a[0]] = w + } + + for j := 0; j < 4; j++ { + var b [5]uint8 + copy(b[:], round.b[j][:]) + w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] + w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] + w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] + w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] + w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] + k[ki] = w + ki++ + } + } + } + + for i := 0; i < 16; i++ { + c.masking[i] = k[i] + c.rotate[i] = uint8(k[16+i] & 0x1f) + } +} + +// These are the three 'f' functions. See RFC 2144, section 2.2. +func f1(d, m uint32, r uint8) uint32 { + t := m + d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] +} + +func f2(d, m uint32, r uint8) uint32 { + t := m ^ d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] +} + +func f3(d, m uint32, r uint8) uint32 { + t := m - d + I := (t << r) | (t >> (32 - r)) + return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] +} + +var sBox = [8][256]uint32{ + { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, + }, + { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, + }, + { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, + }, + { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, + }, + { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, + }, + { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, + }, + { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, + }, + { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, + }, +} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 000000000..b3f74162f --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 000000000..ee7b4bd5f --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 000000000..cd793a5b5 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 000000000..75f24babb --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have an implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 0x7fffff) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 000000000..da9b10d9c --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 000000000..390816106 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 000000000..e0ac30c70 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,SI,CX + ANDQ DX,SI + SHLQ $13,R8,R9 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R10,R11 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 000000000..5822bd533 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 000000000..1f76d1a3f --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R8,R9 + ANDQ SI,R8 + SHLQ $13,R10,R11 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BX,BP + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 000000000..07511a45a --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,CX,R8 + ANDQ SI,CX + SHLQ $13,R9,R10 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R11,R12 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R13,R14 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,R15,BX + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 000000000..c7f8c7e64 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,222 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// In Go 1.13, the ed25519 package was promoted to the standard library as +// crypto/ed25519, and this package became a wrapper for the standard library one. +// +// +build !go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Seed returns the private key seed corresponding to priv. It is provided for +// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds +// in this package. +func (priv PrivateKey) Seed() []byte { + seed := make([]byte, SeedSize) + copy(seed, priv[:32]) + return seed +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + if rand == nil { + rand = cryptorand.Reader + } + + seed := make([]byte, SeedSize) + if _, err := io.ReadFull(rand, seed); err != nil { + return nil, nil, err + } + + privateKey := NewKeyFromSeed(seed) + publicKey := make([]byte, PublicKeySize) + copy(publicKey, privateKey[32:]) + + return publicKey, privateKey, nil +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + if l := len(seed); l != SeedSize { + panic("ed25519: bad seed length: " + strconv.Itoa(l)) + } + + digest := sha512.Sum512(seed) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + privateKey := make([]byte, PrivateKeySize) + copy(privateKey, seed) + copy(privateKey[32:], publicKeyBytes[:]) + + return privateKey +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go new file mode 100644 index 000000000..d1448d8d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go @@ -0,0 +1,73 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 000000000..e39f086c1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 000000000..fd03c252a --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s new file mode 100644 index 000000000..b3a16ef75 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s @@ -0,0 +1,308 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo,!appengine + +#include "textflag.h" + +#define NUM_ROUNDS 10 + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD dst+0(FP), R1 + MOVD src+24(FP), R2 + MOVD src_len+32(FP), R3 + MOVD key+48(FP), R4 + MOVD nonce+56(FP), R6 + MOVD counter+64(FP), R7 + + MOVD $·constants(SB), R10 + MOVD $·incRotMatrix(SB), R11 + + MOVW (R7), R20 + + AND $~255, R3, R13 + ADD R2, R13, R12 // R12 for block end + AND $255, R3, R13 +loop: + MOVD $NUM_ROUNDS, R21 + VLD1 (R11), [V30.S4, V31.S4] + + // load contants + // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + WORD $0x4D60E940 + + // load keys + // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] + WORD $0x4DFFE884 + // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] + WORD $0x4DFFE888 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V12.S4] + WORD $0x4D40C8EC + + // VLD3R (R6), [V13.S4, V14.S4, V15.S4] + WORD $0x4D40E8CD + + // update counter + VADD V30.S4, V12.S4, V12.S4 + +chacha: + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) + VADD V8.S4, V12.S4, V8.S4 + VADD V9.S4, V13.S4, V9.S4 + VADD V10.S4, V14.S4, V10.S4 + VADD V11.S4, V15.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $12, V16.S4, V4.S4 + VSHL $12, V17.S4, V5.S4 + VSHL $12, V18.S4, V6.S4 + VSHL $12, V19.S4, V7.S4 + VSRI $20, V16.S4, V4.S4 + VSRI $20, V17.S4, V5.S4 + VSRI $20, V18.S4, V6.S4 + VSRI $20, V19.S4, V7.S4 + + // V0..V3 += V4..V7 + // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) + VADD V0.S4, V4.S4, V0.S4 + VADD V1.S4, V5.S4, V1.S4 + VADD V2.S4, V6.S4, V2.S4 + VADD V3.S4, V7.S4, V3.S4 + VEOR V12.B16, V0.B16, V12.B16 + VEOR V13.B16, V1.B16, V13.B16 + VEOR V14.B16, V2.B16, V14.B16 + VEOR V15.B16, V3.B16, V15.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V8..V11 += V12..V15 + // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) + VADD V12.S4, V8.S4, V8.S4 + VADD V13.S4, V9.S4, V9.S4 + VADD V14.S4, V10.S4, V10.S4 + VADD V15.S4, V11.S4, V11.S4 + VEOR V8.B16, V4.B16, V16.B16 + VEOR V9.B16, V5.B16, V17.B16 + VEOR V10.B16, V6.B16, V18.B16 + VEOR V11.B16, V7.B16, V19.B16 + VSHL $7, V16.S4, V4.S4 + VSHL $7, V17.S4, V5.S4 + VSHL $7, V18.S4, V6.S4 + VSHL $7, V19.S4, V7.S4 + VSRI $25, V16.S4, V4.S4 + VSRI $25, V17.S4, V5.S4 + VSRI $25, V18.S4, V6.S4 + VSRI $25, V19.S4, V7.S4 + + // V0..V3 += V5..V7, V4 + // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) + VADD V0.S4, V5.S4, V0.S4 + VADD V1.S4, V6.S4, V1.S4 + VADD V2.S4, V7.S4, V2.S4 + VADD V3.S4, V4.S4, V3.S4 + VEOR V15.B16, V0.B16, V15.B16 + VEOR V12.B16, V1.B16, V12.B16 + VEOR V13.B16, V2.B16, V13.B16 + VEOR V14.B16, V3.B16, V14.B16 + VREV32 V12.H8, V12.H8 + VREV32 V13.H8, V13.H8 + VREV32 V14.H8, V14.H8 + VREV32 V15.H8, V15.H8 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 12) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $12, V16.S4, V5.S4 + VSHL $12, V17.S4, V6.S4 + VSHL $12, V18.S4, V7.S4 + VSHL $12, V19.S4, V4.S4 + VSRI $20, V16.S4, V5.S4 + VSRI $20, V17.S4, V6.S4 + VSRI $20, V18.S4, V7.S4 + VSRI $20, V19.S4, V4.S4 + + // V0 += V5; V15 <<<= ((V0 XOR V15), 8) + // ... + VADD V5.S4, V0.S4, V0.S4 + VADD V6.S4, V1.S4, V1.S4 + VADD V7.S4, V2.S4, V2.S4 + VADD V4.S4, V3.S4, V3.S4 + VEOR V0.B16, V15.B16, V15.B16 + VEOR V1.B16, V12.B16, V12.B16 + VEOR V2.B16, V13.B16, V13.B16 + VEOR V3.B16, V14.B16, V14.B16 + VTBL V31.B16, [V12.B16], V12.B16 + VTBL V31.B16, [V13.B16], V13.B16 + VTBL V31.B16, [V14.B16], V14.B16 + VTBL V31.B16, [V15.B16], V15.B16 + + // V10 += V15; V5 <<<= ((V10 XOR V5), 7) + // ... + VADD V15.S4, V10.S4, V10.S4 + VADD V12.S4, V11.S4, V11.S4 + VADD V13.S4, V8.S4, V8.S4 + VADD V14.S4, V9.S4, V9.S4 + VEOR V10.B16, V5.B16, V16.B16 + VEOR V11.B16, V6.B16, V17.B16 + VEOR V8.B16, V7.B16, V18.B16 + VEOR V9.B16, V4.B16, V19.B16 + VSHL $7, V16.S4, V5.S4 + VSHL $7, V17.S4, V6.S4 + VSHL $7, V18.S4, V7.S4 + VSHL $7, V19.S4, V4.S4 + VSRI $25, V16.S4, V5.S4 + VSRI $25, V17.S4, V6.S4 + VSRI $25, V18.S4, V7.S4 + VSRI $25, V19.S4, V4.S4 + + SUB $1, R21 + CBNZ R21, chacha + + // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] + WORD $0x4D60E950 + + // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] + WORD $0x4DFFE894 + VADD V30.S4, V12.S4, V12.S4 + VADD V16.S4, V0.S4, V0.S4 + VADD V17.S4, V1.S4, V1.S4 + VADD V18.S4, V2.S4, V2.S4 + VADD V19.S4, V3.S4, V3.S4 + // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] + WORD $0x4DFFE898 + // restore R4 + SUB $32, R4 + + // load counter + nonce + // VLD1R (R7), [V28.S4] + WORD $0x4D40C8FC + // VLD3R (R6), [V29.S4, V30.S4, V31.S4] + WORD $0x4D40E8DD + + VADD V20.S4, V4.S4, V4.S4 + VADD V21.S4, V5.S4, V5.S4 + VADD V22.S4, V6.S4, V6.S4 + VADD V23.S4, V7.S4, V7.S4 + VADD V24.S4, V8.S4, V8.S4 + VADD V25.S4, V9.S4, V9.S4 + VADD V26.S4, V10.S4, V10.S4 + VADD V27.S4, V11.S4, V11.S4 + VADD V28.S4, V12.S4, V12.S4 + VADD V29.S4, V13.S4, V13.S4 + VADD V30.S4, V14.S4, V14.S4 + VADD V31.S4, V15.S4, V15.S4 + + VZIP1 V1.S4, V0.S4, V16.S4 + VZIP2 V1.S4, V0.S4, V17.S4 + VZIP1 V3.S4, V2.S4, V18.S4 + VZIP2 V3.S4, V2.S4, V19.S4 + VZIP1 V5.S4, V4.S4, V20.S4 + VZIP2 V5.S4, V4.S4, V21.S4 + VZIP1 V7.S4, V6.S4, V22.S4 + VZIP2 V7.S4, V6.S4, V23.S4 + VZIP1 V9.S4, V8.S4, V24.S4 + VZIP2 V9.S4, V8.S4, V25.S4 + VZIP1 V11.S4, V10.S4, V26.S4 + VZIP2 V11.S4, V10.S4, V27.S4 + VZIP1 V13.S4, V12.S4, V28.S4 + VZIP2 V13.S4, V12.S4, V29.S4 + VZIP1 V15.S4, V14.S4, V30.S4 + VZIP2 V15.S4, V14.S4, V31.S4 + VZIP1 V18.D2, V16.D2, V0.D2 + VZIP2 V18.D2, V16.D2, V4.D2 + VZIP1 V19.D2, V17.D2, V8.D2 + VZIP2 V19.D2, V17.D2, V12.D2 + VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] + + VZIP1 V22.D2, V20.D2, V1.D2 + VZIP2 V22.D2, V20.D2, V5.D2 + VZIP1 V23.D2, V21.D2, V9.D2 + VZIP2 V23.D2, V21.D2, V13.D2 + VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] + VZIP1 V26.D2, V24.D2, V2.D2 + VZIP2 V26.D2, V24.D2, V6.D2 + VZIP1 V27.D2, V25.D2, V10.D2 + VZIP2 V27.D2, V25.D2, V14.D2 + VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] + VZIP1 V30.D2, V28.D2, V3.D2 + VZIP2 V30.D2, V28.D2, V7.D2 + VZIP1 V31.D2, V29.D2, V11.D2 + VZIP2 V31.D2, V29.D2, V15.D2 + VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] + VEOR V0.B16, V16.B16, V16.B16 + VEOR V1.B16, V17.B16, V17.B16 + VEOR V2.B16, V18.B16, V18.B16 + VEOR V3.B16, V19.B16, V19.B16 + VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) + VEOR V4.B16, V20.B16, V20.B16 + VEOR V5.B16, V21.B16, V21.B16 + VEOR V6.B16, V22.B16, V22.B16 + VEOR V7.B16, V23.B16, V23.B16 + VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) + VEOR V8.B16, V24.B16, V24.B16 + VEOR V9.B16, V25.B16, V25.B16 + VEOR V10.B16, V26.B16, V26.B16 + VEOR V11.B16, V27.B16, V27.B16 + VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) + VEOR V12.B16, V28.B16, V28.B16 + VEOR V13.B16, V29.B16, V29.B16 + VEOR V14.B16, V30.B16, V30.B16 + VEOR V15.B16, V31.B16, V31.B16 + VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) + + ADD $4, R20 + MOVW R20, (R7) // update counter + + CMP R2, R12 + BGT loop + + RET + + +DATA ·constants+0x00(SB)/4, $0x61707865 +DATA ·constants+0x04(SB)/4, $0x3320646e +DATA ·constants+0x08(SB)/4, $0x79622d32 +DATA ·constants+0x0c(SB)/4, $0x6b206574 +GLOBL ·constants(SB), NOPTR|RODATA, $32 + +DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 +DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 +DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 +DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 +DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 +DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 +DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B +DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F +GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s new file mode 100644 index 000000000..cde3fc989 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s @@ -0,0 +1,668 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on CRYPTOGAMS code with the following comment: +// # ==================================================================== +// # Written by Andy Polyakov for the OpenSSL +// # project. The module is, however, dual licensed under OpenSSL and +// # CRYPTOGAMS licenses depending on where you obtain it. For further +// # details see http://www.openssl.org/~appro/cryptogams/. +// # ==================================================================== + +// Original code can be found at the link below: +// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91e5c39ca79126a4a876d5d8ff + +// There are some differences between CRYPTOGAMS code and this one. The round +// loop for "_int" isn't the same as the original. Some adjustments were +// necessary because there are less vector registers available. For example, some +// X variables (r12, r13, r14, and r15) share the same register used by the +// counter. The original code uses ctr to name the counter. Here we use CNT +// because golang uses CTR as the counter register name. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +#define OUT R3 +#define INP R4 +#define LEN R5 +#define KEY R6 +#define CNT R7 + +#define TEMP R8 + +#define X0 R11 +#define X1 R12 +#define X2 R14 +#define X3 R15 +#define X4 R16 +#define X5 R17 +#define X6 R18 +#define X7 R19 +#define X8 R20 +#define X9 R21 +#define X10 R22 +#define X11 R23 +#define X12 R24 +#define X13 R25 +#define X14 R26 +#define X15 R27 + +#define CON0 X0 +#define CON1 X1 +#define CON2 X2 +#define CON3 X3 + +#define KEY0 X4 +#define KEY1 X5 +#define KEY2 X6 +#define KEY3 X7 +#define KEY4 X8 +#define KEY5 X9 +#define KEY6 X10 +#define KEY7 X11 + +#define CNT0 X12 +#define CNT1 X13 +#define CNT2 X14 +#define CNT3 X15 + +#define TMP0 R9 +#define TMP1 R10 +#define TMP2 R28 +#define TMP3 R29 + +#define CONSTS R8 + +#define A0 V0 +#define B0 V1 +#define C0 V2 +#define D0 V3 +#define A1 V4 +#define B1 V5 +#define C1 V6 +#define D1 V7 +#define A2 V8 +#define B2 V9 +#define C2 V10 +#define D2 V11 +#define T0 V12 +#define T1 V13 +#define T2 V14 + +#define K0 V15 +#define K1 V16 +#define K2 V17 +#define K3 V18 +#define K4 V19 +#define K5 V20 + +#define FOUR V21 +#define SIXTEEN V22 +#define TWENTY4 V23 +#define TWENTY V24 +#define TWELVE V25 +#define TWENTY5 V26 +#define SEVEN V27 + +#define INPPERM V28 +#define OUTPERM V29 +#define OUTMASK V30 + +#define DD0 V31 +#define DD1 SEVEN +#define DD2 T0 +#define DD3 T1 +#define DD4 T2 + +DATA ·consts+0x00(SB)/8, $0x3320646e61707865 +DATA ·consts+0x08(SB)/8, $0x6b20657479622d32 +DATA ·consts+0x10(SB)/8, $0x0000000000000001 +DATA ·consts+0x18(SB)/8, $0x0000000000000000 +DATA ·consts+0x20(SB)/8, $0x0000000000000004 +DATA ·consts+0x28(SB)/8, $0x0000000000000000 +DATA ·consts+0x30(SB)/8, $0x0a0b08090e0f0c0d +DATA ·consts+0x38(SB)/8, $0x0203000106070405 +DATA ·consts+0x40(SB)/8, $0x090a0b080d0e0f0c +DATA ·consts+0x48(SB)/8, $0x0102030005060704 +GLOBL ·consts(SB), RODATA, $80 + +//func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[32]byte, counter *[16]byte) +TEXT ·chaCha20_ctr32_vmx(SB),NOSPLIT|NOFRAME,$0 + // Load the arguments inside the registers + MOVD out+0(FP), OUT + MOVD inp+8(FP), INP + MOVD len+16(FP), LEN + MOVD key+24(FP), KEY + MOVD counter+32(FP), CNT + + MOVD $·consts(SB), CONSTS // point to consts addr + + MOVD $16, X0 + MOVD $32, X1 + MOVD $48, X2 + MOVD $64, X3 + MOVD $31, X4 + MOVD $15, X5 + + // Load key + LVX (KEY)(R0), K1 + LVSR (KEY)(R0), T0 + LVX (KEY)(X0), K2 + LVX (KEY)(X4), DD0 + + // Load counter + LVX (CNT)(R0), K3 + LVSR (CNT)(R0), T1 + LVX (CNT)(X5), DD1 + + // Load constants + LVX (CONSTS)(R0), K0 + LVX (CONSTS)(X0), K5 + LVX (CONSTS)(X1), FOUR + LVX (CONSTS)(X2), SIXTEEN + LVX (CONSTS)(X3), TWENTY4 + + // Align key and counter + VPERM K2, K1, T0, K1 + VPERM DD0, K2, T0, K2 + VPERM DD1, K3, T1, K3 + + // Load counter to GPR + MOVWZ 0(CNT), CNT0 + MOVWZ 4(CNT), CNT1 + MOVWZ 8(CNT), CNT2 + MOVWZ 12(CNT), CNT3 + + // Adjust vectors for the initial state + VADDUWM K3, K5, K3 + VADDUWM K3, K5, K4 + VADDUWM K4, K5, K5 + + // Synthesized constants + VSPLTISW $-12, TWENTY + VSPLTISW $12, TWELVE + VSPLTISW $-7, TWENTY5 + + VXOR T0, T0, T0 + VSPLTISW $-1, OUTMASK + LVSR (INP)(R0), INPPERM + LVSL (OUT)(R0), OUTPERM + VPERM OUTMASK, T0, OUTPERM, OUTMASK + +loop_outer_vmx: + // Load constant + MOVD $0x61707865, CON0 + MOVD $0x3320646e, CON1 + MOVD $0x79622d32, CON2 + MOVD $0x6b206574, CON3 + + VOR K0, K0, A0 + VOR K0, K0, A1 + VOR K0, K0, A2 + VOR K1, K1, B0 + + MOVD $10, TEMP + + // Load key to GPR + MOVWZ 0(KEY), X4 + MOVWZ 4(KEY), X5 + MOVWZ 8(KEY), X6 + MOVWZ 12(KEY), X7 + VOR K1, K1, B1 + VOR K1, K1, B2 + MOVWZ 16(KEY), X8 + MOVWZ 0(CNT), X12 + MOVWZ 20(KEY), X9 + MOVWZ 4(CNT), X13 + VOR K2, K2, C0 + VOR K2, K2, C1 + MOVWZ 24(KEY), X10 + MOVWZ 8(CNT), X14 + VOR K2, K2, C2 + VOR K3, K3, D0 + MOVWZ 28(KEY), X11 + MOVWZ 12(CNT), X15 + VOR K4, K4, D1 + VOR K5, K5, D2 + + MOVD X4, TMP0 + MOVD X5, TMP1 + MOVD X6, TMP2 + MOVD X7, TMP3 + VSPLTISW $7, SEVEN + + MOVD TEMP, CTR + +loop_vmx: + // CRYPTOGAMS uses a macro to create a loop using perl. This isn't possible + // using assembly macros. Therefore, the macro expansion result was used + // in order to maintain the algorithm efficiency. + // This loop generates three keystream blocks using VMX instructions and, + // in parallel, one keystream block using scalar instructions. + ADD X4, X0, X0 + ADD X5, X1, X1 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VPERM D0, D0, SIXTEEN, D0 + VPERM D1, D1, SIXTEEN, D1 + ROTLW $16, X12, X12 + ROTLW $16, X13, X13 + VPERM D2, D2, SIXTEEN, D2 + VADDUWM C0, D0, C0 + ROTLW $16, X14, X14 + ROTLW $16, X15, X15 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VXOR B2, C2, T2 + VRLW T0, TWELVE, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VRLW T1, TWELVE, B1 + VRLW T2, TWELVE, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VADDUWM A0, B0, A0 + VADDUWM A1, B1, A1 + ROTLW $12, X4, X4 + ROTLW $12, X5, X5 + VADDUWM A2, B2, A2 + VXOR D0, A0, D0 + ROTLW $12, X6, X6 + ROTLW $12, X7, X7 + VXOR D1, A1, D1 + VXOR D2, A2, D2 + ADD X4, X0, X0 + ADD X5, X1, X1 + VPERM D0, D0, TWENTY4, D0 + VPERM D1, D1, TWENTY4, D1 + ADD X6, X2, X2 + ADD X7, X3, X3 + VPERM D2, D2, TWENTY4, D2 + VADDUWM C0, D0, C0 + XOR X0, X12, X12 + XOR X1, X13, X13 + VADDUWM C1, D1, C1 + VADDUWM C2, D2, C2 + XOR X2, X14, X14 + XOR X3, X15, X15 + VXOR B0, C0, T0 + VXOR B1, C1, T1 + ROTLW $8, X12, X12 + ROTLW $8, X13, X13 + VXOR B2, C2, T2 + VRLW T0, SEVEN, B0 + ROTLW $8, X14, X14 + ROTLW $8, X15, X15 + VRLW T1, SEVEN, B1 + VRLW T2, SEVEN, B2 + ADD X12, X8, X8 + ADD X13, X9, X9 + VSLDOI $8, C0, C0, C0 + VSLDOI $8, C1, C1, C1 + ADD X14, X10, X10 + ADD X15, X11, X11 + VSLDOI $8, C2, C2, C2 + VSLDOI $12, B0, B0, B0 + XOR X8, X4, X4 + XOR X9, X5, X5 + VSLDOI $12, B1, B1, B1 + VSLDOI $12, B2, B2, B2 + XOR X10, X6, X6 + XOR X11, X7, X7 + VSLDOI $4, D0, D0, D0 + VSLDOI $4, D1, D1, D1 + ROTLW $7, X4, X4 + ROTLW $7, X5, X5 + VSLDOI $4, D2, D2, D2 + VADDUWM A0, B0, A0 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VXOR D2, A2, D2 + VPERM D0, D0, SIXTEEN, D0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VPERM D1, D1, SIXTEEN, D1 + VPERM D2, D2, SIXTEEN, D2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ROTLW $16, X15, X15 + ROTLW $16, X12, X12 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + ROTLW $16, X13, X13 + ROTLW $16, X14, X14 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VRLW T0, TWELVE, B0 + VRLW T1, TWELVE, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VRLW T2, TWELVE, B2 + VADDUWM A0, B0, A0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VADDUWM A1, B1, A1 + VADDUWM A2, B2, A2 + XOR X8, X7, X7 + XOR X9, X4, X4 + VXOR D0, A0, D0 + VXOR D1, A1, D1 + ROTLW $12, X5, X5 + ROTLW $12, X6, X6 + VXOR D2, A2, D2 + VPERM D0, D0, TWENTY4, D0 + ROTLW $12, X7, X7 + ROTLW $12, X4, X4 + VPERM D1, D1, TWENTY4, D1 + VPERM D2, D2, TWENTY4, D2 + ADD X5, X0, X0 + ADD X6, X1, X1 + VADDUWM C0, D0, C0 + VADDUWM C1, D1, C1 + ADD X7, X2, X2 + ADD X4, X3, X3 + VADDUWM C2, D2, C2 + VXOR B0, C0, T0 + XOR X0, X15, X15 + XOR X1, X12, X12 + VXOR B1, C1, T1 + VXOR B2, C2, T2 + XOR X2, X13, X13 + XOR X3, X14, X14 + VRLW T0, SEVEN, B0 + VRLW T1, SEVEN, B1 + ROTLW $8, X15, X15 + ROTLW $8, X12, X12 + VRLW T2, SEVEN, B2 + VSLDOI $8, C0, C0, C0 + ROTLW $8, X13, X13 + ROTLW $8, X14, X14 + VSLDOI $8, C1, C1, C1 + VSLDOI $8, C2, C2, C2 + ADD X15, X10, X10 + ADD X12, X11, X11 + VSLDOI $4, B0, B0, B0 + VSLDOI $4, B1, B1, B1 + ADD X13, X8, X8 + ADD X14, X9, X9 + VSLDOI $4, B2, B2, B2 + VSLDOI $12, D0, D0, D0 + XOR X10, X5, X5 + XOR X11, X6, X6 + VSLDOI $12, D1, D1, D1 + VSLDOI $12, D2, D2, D2 + XOR X8, X7, X7 + XOR X9, X4, X4 + ROTLW $7, X5, X5 + ROTLW $7, X6, X6 + ROTLW $7, X7, X7 + ROTLW $7, X4, X4 + BC 0x10, 0, loop_vmx + + SUB $256, LEN, LEN + + // Accumulate key block + ADD $0x61707865, X0, X0 + ADD $0x3320646e, X1, X1 + ADD $0x79622d32, X2, X2 + ADD $0x6b206574, X3, X3 + ADD TMP0, X4, X4 + ADD TMP1, X5, X5 + ADD TMP2, X6, X6 + ADD TMP3, X7, X7 + MOVWZ 16(KEY), TMP0 + MOVWZ 20(KEY), TMP1 + MOVWZ 24(KEY), TMP2 + MOVWZ 28(KEY), TMP3 + ADD TMP0, X8, X8 + ADD TMP1, X9, X9 + ADD TMP2, X10, X10 + ADD TMP3, X11, X11 + + MOVWZ 12(CNT), TMP0 + MOVWZ 8(CNT), TMP1 + MOVWZ 4(CNT), TMP2 + MOVWZ 0(CNT), TEMP + ADD TMP0, X15, X15 + ADD TMP1, X14, X14 + ADD TMP2, X13, X13 + ADD TEMP, X12, X12 + + // Accumulate key block + VADDUWM A0, K0, A0 + VADDUWM A1, K0, A1 + VADDUWM A2, K0, A2 + VADDUWM B0, K1, B0 + VADDUWM B1, K1, B1 + VADDUWM B2, K1, B2 + VADDUWM C0, K2, C0 + VADDUWM C1, K2, C1 + VADDUWM C2, K2, C2 + VADDUWM D0, K3, D0 + VADDUWM D1, K4, D1 + VADDUWM D2, K5, D2 + + // Increment counter + ADD $4, TEMP, TEMP + MOVW TEMP, 0(CNT) + + VADDUWM K3, FOUR, K3 + VADDUWM K4, FOUR, K4 + VADDUWM K5, FOUR, K5 + + // XOR the input slice (INP) with the keystream, which is stored in GPRs (X0-X3). + + // Load input (aligned or not) + MOVWZ 0(INP), TMP0 + MOVWZ 4(INP), TMP1 + MOVWZ 8(INP), TMP2 + MOVWZ 12(INP), TMP3 + + // XOR with input + XOR TMP0, X0, X0 + XOR TMP1, X1, X1 + XOR TMP2, X2, X2 + XOR TMP3, X3, X3 + MOVWZ 16(INP), TMP0 + MOVWZ 20(INP), TMP1 + MOVWZ 24(INP), TMP2 + MOVWZ 28(INP), TMP3 + XOR TMP0, X4, X4 + XOR TMP1, X5, X5 + XOR TMP2, X6, X6 + XOR TMP3, X7, X7 + MOVWZ 32(INP), TMP0 + MOVWZ 36(INP), TMP1 + MOVWZ 40(INP), TMP2 + MOVWZ 44(INP), TMP3 + XOR TMP0, X8, X8 + XOR TMP1, X9, X9 + XOR TMP2, X10, X10 + XOR TMP3, X11, X11 + MOVWZ 48(INP), TMP0 + MOVWZ 52(INP), TMP1 + MOVWZ 56(INP), TMP2 + MOVWZ 60(INP), TMP3 + XOR TMP0, X12, X12 + XOR TMP1, X13, X13 + XOR TMP2, X14, X14 + XOR TMP3, X15, X15 + + // Store output (aligned or not) + MOVW X0, 0(OUT) + MOVW X1, 4(OUT) + MOVW X2, 8(OUT) + MOVW X3, 12(OUT) + + ADD $64, INP, INP // INP points to the end of the slice for the alignment code below + + MOVW X4, 16(OUT) + MOVD $16, TMP0 + MOVW X5, 20(OUT) + MOVD $32, TMP1 + MOVW X6, 24(OUT) + MOVD $48, TMP2 + MOVW X7, 28(OUT) + MOVD $64, TMP3 + MOVW X8, 32(OUT) + MOVW X9, 36(OUT) + MOVW X10, 40(OUT) + MOVW X11, 44(OUT) + MOVW X12, 48(OUT) + MOVW X13, 52(OUT) + MOVW X14, 56(OUT) + MOVW X15, 60(OUT) + ADD $64, OUT, OUT + + // Load input + LVX (INP)(R0), DD0 + LVX (INP)(TMP0), DD1 + LVX (INP)(TMP1), DD2 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD4 + ADD $64, INP, INP + + VPERM DD1, DD0, INPPERM, DD0 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A0, DD0, A0 // XOR with input + VXOR B0, DD1, B0 + LVX (INP)(TMP0), DD1 // Keep loading input + VXOR C0, DD2, C0 + LVX (INP)(TMP1), DD2 + VXOR D0, DD3, D0 + LVX (INP)(TMP2), DD3 + LVX (INP)(TMP3), DD0 + ADD $64, INP, INP + MOVD $63, TMP3 // 63 is not a typo + VPERM A0, A0, OUTPERM, A0 + VPERM B0, B0, OUTPERM, B0 + VPERM C0, C0, OUTPERM, C0 + VPERM D0, D0, OUTPERM, D0 + + VPERM DD1, DD4, INPPERM, DD4 // Align input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD0, DD3, INPPERM, DD3 + VXOR A1, DD4, A1 + VXOR B1, DD1, B1 + LVX (INP)(TMP0), DD1 // Keep loading + VXOR C1, DD2, C1 + LVX (INP)(TMP1), DD2 + VXOR D1, DD3, D1 + LVX (INP)(TMP2), DD3 + + // Note that the LVX address is always rounded down to the nearest 16-byte + // boundary, and that it always points to at most 15 bytes beyond the end of + // the slice, so we cannot cross a page boundary. + LVX (INP)(TMP3), DD4 // Redundant in aligned case. + ADD $64, INP, INP + VPERM A1, A1, OUTPERM, A1 // Pre-misalign output + VPERM B1, B1, OUTPERM, B1 + VPERM C1, C1, OUTPERM, C1 + VPERM D1, D1, OUTPERM, D1 + + VPERM DD1, DD0, INPPERM, DD0 // Align Input + VPERM DD2, DD1, INPPERM, DD1 + VPERM DD3, DD2, INPPERM, DD2 + VPERM DD4, DD3, INPPERM, DD3 + VXOR A2, DD0, A2 + VXOR B2, DD1, B2 + VXOR C2, DD2, C2 + VXOR D2, DD3, D2 + VPERM A2, A2, OUTPERM, A2 + VPERM B2, B2, OUTPERM, B2 + VPERM C2, C2, OUTPERM, C2 + VPERM D2, D2, OUTPERM, D2 + + ANDCC $15, OUT, X1 // Is out aligned? + MOVD OUT, X0 + + VSEL A0, B0, OUTMASK, DD0 // Collect pre-misaligned output + VSEL B0, C0, OUTMASK, DD1 + VSEL C0, D0, OUTMASK, DD2 + VSEL D0, A1, OUTMASK, DD3 + VSEL A1, B1, OUTMASK, B0 + VSEL B1, C1, OUTMASK, C0 + VSEL C1, D1, OUTMASK, D0 + VSEL D1, A2, OUTMASK, A1 + VSEL A2, B2, OUTMASK, B1 + VSEL B2, C2, OUTMASK, C1 + VSEL C2, D2, OUTMASK, D1 + + STVX DD0, (OUT+TMP0) + STVX DD1, (OUT+TMP1) + STVX DD2, (OUT+TMP2) + ADD $64, OUT, OUT + STVX DD3, (OUT+R0) + STVX B0, (OUT+TMP0) + STVX C0, (OUT+TMP1) + STVX D0, (OUT+TMP2) + ADD $64, OUT, OUT + STVX A1, (OUT+R0) + STVX B1, (OUT+TMP0) + STVX C1, (OUT+TMP1) + STVX D1, (OUT+TMP2) + ADD $64, OUT, OUT + + BEQ aligned_vmx + + SUB X1, OUT, X2 // in misaligned case edges + MOVD $0, X3 // are written byte-by-byte + +unaligned_tail_vmx: + STVEBX D2, (X2+X3) + ADD $1, X3, X3 + CMPW X3, X1 + BNE unaligned_tail_vmx + SUB X1, X0, X2 + +unaligned_head_vmx: + STVEBX A0, (X2+X1) + CMPW X1, $15 + ADD $1, X1, X1 + BNE unaligned_head_vmx + + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + + JMP done_vmx + +aligned_vmx: + STVX A0, (X0+R0) + CMPU LEN, $255 // done with 256-byte block yet? + BGT loop_outer_vmx + +done_vmx: + RET diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go new file mode 100644 index 000000000..ad74e23ae --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 +// +build !gccgo + +package chacha20 + +const ( + haveAsm = true + bufSize = 256 +) + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + + if len(src) >= bufSize { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } + + if len(src)%bufSize != 0 { + i := len(src) - len(src)%bufSize + c.buf = [bufSize]byte{} + copy(c.buf[:], src[i:]) + xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter) + c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize]) + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go new file mode 100644 index 000000000..6570847f5 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go @@ -0,0 +1,264 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ChaCha20 implements the core ChaCha20 function as specified +// in https://tools.ietf.org/html/rfc7539#section-2.3. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + + "golang.org/x/crypto/internal/subtle" +) + +// assert that *Cipher implements cipher.Stream +var _ cipher.Stream = (*Cipher)(nil) + +// Cipher is a stateful instance of ChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + key [8]uint32 + counter uint32 // incremented after each block + nonce [3]uint32 + buf [bufSize]byte // buffer for unused keystream bytes + len int // number of unused keystream bytes at end of buf +} + +// New creates a new ChaCha20 stream cipher with the given key and nonce. +// The initial counter value is set to 0. +func New(key [8]uint32, nonce [3]uint32) *Cipher { + return &Cipher{key: key, nonce: nonce} +} + +// ChaCha20 constants spelling "expand 32-byte k" +const ( + j0 uint32 = 0x61707865 + j1 uint32 = 0x3320646e + j2 uint32 = 0x79622d32 + j3 uint32 = 0x6b206574 +) + +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = (d << 16) | (d >> 16) + c += d + b ^= c + b = (b << 12) | (b >> 20) + a += b + d ^= a + d = (d << 8) | (d >> 24) + c += d + b ^= c + b = (b << 7) | (b >> 25) + return a, b, c, d +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + if subtle.InexactOverlap(dst[:len(src)], src) { + panic("chacha20: invalid buffer overlap") + } + + // xor src with buffered keystream first + if s.len != 0 { + buf := s.buf[len(s.buf)-s.len:] + if len(src) < len(buf) { + buf = buf[:len(src)] + } + td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint + for i, b := range buf { + td[i] = ts[i] ^ b + } + s.len -= len(buf) + if s.len != 0 { + return + } + s.buf = [len(s.buf)]byte{} // zero the empty buffer + src = src[len(buf):] + dst = dst[len(buf):] + } + + if len(src) == 0 { + return + } + if haveAsm { + if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 { + panic("chacha20: counter overflow") + } + s.xorKeyStreamAsm(dst, src) + return + } + + // set up a 64-byte buffer to pad out the final block if needed + // (hoisted out of the main loop to avoid spills) + rem := len(src) % 64 // length of final block + fin := len(src) - rem // index of final block + if rem > 0 { + copy(s.buf[len(s.buf)-64:], src[fin:]) + } + + // pre-calculate most of the first round + s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0]) + s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1]) + s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2]) + + n := len(src) + src, dst = src[:n:n], dst[:n:n] // BCE hint + for i := 0; i < n; i += 64 { + // calculate the remainder of the first round + s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter) + + // execute the second round + x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15) + x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12) + x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13) + x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14) + + // execute the remaining 18 rounds + for i := 0; i < 9; i++ { + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + + x4 += s.key[0] + x5 += s.key[1] + x6 += s.key[2] + x7 += s.key[3] + x8 += s.key[4] + x9 += s.key[5] + x10 += s.key[6] + x11 += s.key[7] + + x12 += s.counter + x13 += s.nonce[0] + x14 += s.nonce[1] + x15 += s.nonce[2] + + // increment the counter + s.counter += 1 + if s.counter == 0 { + panic("chacha20: counter overflow") + } + + // pad to 64 bytes if needed + in, out := src[i:], dst[i:] + if i == fin { + // src[fin:] has already been copied into s.buf before + // the main loop + in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:] + } + in, out = in[:64], out[:64] // BCE hint + + // XOR the key stream with the source and write out the result + xor(out[0:], in[0:], x0) + xor(out[4:], in[4:], x1) + xor(out[8:], in[8:], x2) + xor(out[12:], in[12:], x3) + xor(out[16:], in[16:], x4) + xor(out[20:], in[20:], x5) + xor(out[24:], in[24:], x6) + xor(out[28:], in[28:], x7) + xor(out[32:], in[32:], x8) + xor(out[36:], in[36:], x9) + xor(out[40:], in[40:], x10) + xor(out[44:], in[44:], x11) + xor(out[48:], in[48:], x12) + xor(out[52:], in[52:], x13) + xor(out[56:], in[56:], x14) + xor(out[60:], in[60:], x15) + } + // copy any trailing bytes out of the buffer and into dst + if rem != 0 { + s.len = 64 - rem + copy(dst[fin:], s.buf[len(s.buf)-64:]) + } +} + +// Advance discards bytes in the key stream until the next 64 byte block +// boundary is reached and updates the counter accordingly. If the key +// stream is already at a block boundary no bytes will be discarded and +// the counter will be unchanged. +func (s *Cipher) Advance() { + s.len -= s.len % 64 + if s.len == 0 { + s.buf = [len(s.buf)]byte{} + } +} + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter contains the raw +// ChaCha20 counter bytes (i.e. block counter followed by nonce). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + s := Cipher{ + key: [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + }, + nonce: [3]uint32{ + binary.LittleEndian.Uint32(counter[4:8]), + binary.LittleEndian.Uint32(counter[8:12]), + binary.LittleEndian.Uint32(counter[12:16]), + }, + counter: binary.LittleEndian.Uint32(counter[0:4]), + } + s.XORKeyStream(out, in) +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a +// nonce. It should only be used as part of the XChaCha20 construction. +func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 { + x0, x1, x2, x3 := j0, j1, j2, j3 + x4, x5, x6, x7 := key[0], key[1], key[2], key[3] + x8, x9, x10, x11 := key[4], key[5], key[6], key[7] + x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3] + + for i := 0; i < 10; i++ { + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + var out [8]uint32 + out[0], out[1], out[2], out[3] = x0, x1, x2, x3 + out[4], out[5], out[6], out[7] = x12, x13, x14, x15 + return out +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go new file mode 100644 index 000000000..bf8beba67 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !ppc64le,!arm64,!s390x arm64,!go1.11 gccgo appengine + +package chacha20 + +const ( + bufSize = 64 + haveAsm = false +) + +func (*Cipher) xorKeyStreamAsm(dst, src []byte) { + panic("not implemented") +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go new file mode 100644 index 000000000..638cb5e5d --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package chacha20 + +import "encoding/binary" + +const ( + bufSize = 256 + haveAsm = true +) + +//go:noescape +func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + if len(src) >= bufSize { + chaCha20_ctr32_vmx(&dst[0], &src[0], len(src)-len(src)%bufSize, &c.key, &c.counter) + } + if len(src)%bufSize != 0 { + chaCha20_ctr32_vmx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter) + start := len(src) - len(src)%bufSize + ts, td, tb := src[start:], dst[start:], c.buf[:] + // Unroll loop to XOR 32 bytes per iteration. + for i := 0; i < len(ts)-32; i += 32 { + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + s0 := binary.LittleEndian.Uint64(ts[0:8]) + s1 := binary.LittleEndian.Uint64(ts[8:16]) + s2 := binary.LittleEndian.Uint64(ts[16:24]) + s3 := binary.LittleEndian.Uint64(ts[24:32]) + b0 := binary.LittleEndian.Uint64(tb[0:8]) + b1 := binary.LittleEndian.Uint64(tb[8:16]) + b2 := binary.LittleEndian.Uint64(tb[16:24]) + b3 := binary.LittleEndian.Uint64(tb[24:32]) + binary.LittleEndian.PutUint64(td[0:8], s0^b0) + binary.LittleEndian.PutUint64(td[8:16], s1^b1) + binary.LittleEndian.PutUint64(td[16:24], s2^b2) + binary.LittleEndian.PutUint64(td[24:32], s3^b3) + ts, td, tb = ts[32:], td[32:], tb[32:] + } + td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination + for i, v := range ts { + td[i] = tb[i] ^ v + } + c.len = bufSize - (len(src) % bufSize) + + } + +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go new file mode 100644 index 000000000..aad645b44 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!gccgo,!appengine + +package chacha20 + +import ( + "golang.org/x/sys/cpu" +) + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. +// Implementation in asm_s390x.s. +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) + +func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len) +} + +// EXRL targets, DO NOT CALL! +func mvcSrcToBuf() +func mvcBufToDst() diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s new file mode 100644 index 000000000..57df40446 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s @@ -0,0 +1,260 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!gccgo,!appengine + +#include "go_asm.h" +#include "textflag.h" + +// This is an implementation of the ChaCha20 encryption algorithm as +// specified in RFC 7539. It uses vector instructions to compute +// 4 keystream blocks in parallel (256 bytes) which are then XORed +// with the bytes in the input slice. + +GLOBL ·constants<>(SB), RODATA|NOPTR, $32 +// BSWAP: swap bytes in each 4-byte element +DATA ·constants<>+0x00(SB)/4, $0x03020100 +DATA ·constants<>+0x04(SB)/4, $0x07060504 +DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 +DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c +// J0: [j0, j1, j2, j3] +DATA ·constants<>+0x10(SB)/4, $0x61707865 +DATA ·constants<>+0x14(SB)/4, $0x3320646e +DATA ·constants<>+0x18(SB)/4, $0x79622d32 +DATA ·constants<>+0x1c(SB)/4, $0x6b206574 + +// EXRL targets: +TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0 + MVC $1, (R1), (R8) + RET + +TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0 + MVC $1, (R8), (R9) + RET + +#define BSWAP V5 +#define J0 V6 +#define KEY0 V7 +#define KEY1 V8 +#define NONCE V9 +#define CTR V10 +#define M0 V11 +#define M1 V12 +#define M2 V13 +#define M3 V14 +#define INC V15 +#define X0 V16 +#define X1 V17 +#define X2 V18 +#define X3 V19 +#define X4 V20 +#define X5 V21 +#define X6 V22 +#define X7 V23 +#define X8 V24 +#define X9 V25 +#define X10 V26 +#define X11 V27 +#define X12 V28 +#define X13 V29 +#define X14 V30 +#define X15 V31 + +#define NUM_ROUNDS 20 + +#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $16, a2, a2 \ + VERLLF $16, b2, b2 \ + VERLLF $16, c2, c2 \ + VERLLF $16, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $12, a1, a1 \ + VERLLF $12, b1, b1 \ + VERLLF $12, c1, c1 \ + VERLLF $12, d1, d1 \ + VAF a1, a0, a0 \ + VAF b1, b0, b0 \ + VAF c1, c0, c0 \ + VAF d1, d0, d0 \ + VX a0, a2, a2 \ + VX b0, b2, b2 \ + VX c0, c2, c2 \ + VX d0, d2, d2 \ + VERLLF $8, a2, a2 \ + VERLLF $8, b2, b2 \ + VERLLF $8, c2, c2 \ + VERLLF $8, d2, d2 \ + VAF a2, a3, a3 \ + VAF b2, b3, b3 \ + VAF c2, c3, c3 \ + VAF d2, d3, d3 \ + VX a3, a1, a1 \ + VX b3, b1, b1 \ + VX c3, c1, c1 \ + VX d3, d1, d1 \ + VERLLF $7, a1, a1 \ + VERLLF $7, b1, b1 \ + VERLLF $7, c1, c1 \ + VERLLF $7, d1, d1 + +#define PERMUTE(mask, v0, v1, v2, v3) \ + VPERM v0, v0, mask, v0 \ + VPERM v1, v1, mask, v1 \ + VPERM v2, v2, mask, v2 \ + VPERM v3, v3, mask, v3 + +#define ADDV(x, v0, v1, v2, v3) \ + VAF x, v0, v0 \ + VAF x, v1, v1 \ + VAF x, v2, v2 \ + VAF x, v3, v3 + +#define XORV(off, dst, src, v0, v1, v2, v3) \ + VLM off(src), M0, M3 \ + PERMUTE(BSWAP, v0, v1, v2, v3) \ + VX v0, M0, M0 \ + VX v1, M1, M1 \ + VX v2, M2, M2 \ + VX v3, M3, M3 \ + VSTM M0, M3, off(dst) + +#define SHUFFLE(a, b, c, d, t, u, v, w) \ + VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} + VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} + VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} + VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} + VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} + VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} + VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} + VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} + +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) +TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 + MOVD $·constants<>(SB), R1 + MOVD dst+0(FP), R2 // R2=&dst[0] + LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) + MOVD key+48(FP), R5 // R5=key + MOVD nonce+56(FP), R6 // R6=nonce + MOVD counter+64(FP), R7 // R7=counter + MOVD buf+72(FP), R8 // R8=buf + MOVD len+80(FP), R9 // R9=len + + // load BSWAP and J0 + VLM (R1), BSWAP, J0 + + // set up tail buffer + ADD $-1, R4, R12 + MOVBZ R12, R12 + CMPUBEQ R12, $255, aligned + MOVD R4, R1 + AND $~255, R1 + MOVD $(R3)(R1*1), R1 + EXRL $·mvcSrcToBuf(SB), R12 + MOVD $255, R0 + SUB R12, R0 + MOVD R0, (R9) // update len + +aligned: + // setup + MOVD $95, R0 + VLM (R5), KEY0, KEY1 + VLL R0, (R6), NONCE + VZERO M0 + VLEIB $7, $32, M0 + VSRLB M0, NONCE, NONCE + + // initialize counter values + VLREPF (R7), CTR + VZERO INC + VLEIF $1, $1, INC + VLEIF $2, $2, INC + VLEIF $3, $3, INC + VAF INC, CTR, CTR + VREPIF $4, INC + +chacha: + VREPF $0, J0, X0 + VREPF $1, J0, X1 + VREPF $2, J0, X2 + VREPF $3, J0, X3 + VREPF $0, KEY0, X4 + VREPF $1, KEY0, X5 + VREPF $2, KEY0, X6 + VREPF $3, KEY0, X7 + VREPF $0, KEY1, X8 + VREPF $1, KEY1, X9 + VREPF $2, KEY1, X10 + VREPF $3, KEY1, X11 + VLR CTR, X12 + VREPF $1, NONCE, X13 + VREPF $2, NONCE, X14 + VREPF $3, NONCE, X15 + + MOVD $(NUM_ROUNDS/2), R1 + +loop: + ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) + ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) + + ADD $-1, R1 + BNE loop + + // decrement length + ADD $-256, R4 + BLT tail + +continue: + // rearrange vectors + SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) + ADDV(J0, X0, X1, X2, X3) + SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) + ADDV(KEY0, X4, X5, X6, X7) + SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) + ADDV(KEY1, X8, X9, X10, X11) + VAF CTR, X12, X12 + SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) + ADDV(NONCE, X12, X13, X14, X15) + + // increment counters + VAF INC, CTR, CTR + + // xor keystream with plaintext + XORV(0*64, R2, R3, X0, X4, X8, X12) + XORV(1*64, R2, R3, X1, X5, X9, X13) + XORV(2*64, R2, R3, X2, X6, X10, X14) + XORV(3*64, R2, R3, X3, X7, X11, X15) + + // increment pointers + MOVD $256(R2), R2 + MOVD $256(R3), R3 + + CMPBNE R4, $0, chacha + CMPUBEQ R12, $255, return + EXRL $·mvcBufToDst(SB), R12 // len was updated during setup + +return: + VSTEF $0, CTR, (R7) + RET + +tail: + MOVD R2, R9 + MOVD R8, R2 + MOVD R8, R3 + MOVD $0, R4 + JMP continue diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/internal/chacha20/xor.go new file mode 100644 index 000000000..9c5ba0b33 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/chacha20/xor.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found src the LICENSE file. + +package chacha20 + +import ( + "runtime" +) + +// Platforms that have fast unaligned 32-bit little endian accesses. +const unaligned = runtime.GOARCH == "386" || + runtime.GOARCH == "amd64" || + runtime.GOARCH == "arm64" || + runtime.GOARCH == "ppc64le" || + runtime.GOARCH == "s390x" + +// xor reads a little endian uint32 from src, XORs it with u and +// places the result in little endian byte order in dst. +func xor(dst, src []byte, u uint32) { + _, _ = src[3], dst[3] // eliminate bounds checks + if unaligned { + // The compiler should optimize this code into + // 32-bit unaligned little endian loads and stores. + // TODO: delete once the compiler does a reliably + // good job with the generic code below. + // See issue #25111 for more details. + v := uint32(src[0]) + v |= uint32(src[1]) << 8 + v |= uint32(src[2]) << 16 + v |= uint32(src[3]) << 24 + v ^= u + dst[0] = byte(v) + dst[1] = byte(v >> 8) + dst[2] = byte(v >> 16) + dst[3] = byte(v >> 24) + } else { + dst[0] = src[0] ^ byte(u) + dst[1] = src[1] ^ byte(u>>8) + dst[2] = src[2] ^ byte(u>>16) + dst[3] = src[3] ^ byte(u>>24) + } +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go new file mode 100644 index 000000000..f38797bfa --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go @@ -0,0 +1,32 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +import "unsafe" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && + uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go new file mode 100644 index 000000000..0cc4a8a64 --- /dev/null +++ b/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// Package subtle implements functions that are often useful in cryptographic +// code but require careful thought to use correctly. +package subtle // import "golang.org/x/crypto/internal/subtle" + +// This is the Google App Engine standard variant based on reflect +// because the unsafe package and cgo are disallowed. + +import "reflect" + +// AnyOverlap reports whether x and y share memory at any (not necessarily +// corresponding) index. The memory beyond the slice length is ignored. +func AnyOverlap(x, y []byte) bool { + return len(x) > 0 && len(y) > 0 && + reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && + reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() +} + +// InexactOverlap reports whether x and y share memory at any non-corresponding +// index. The memory beyond the slice length is ignored. Note that x and y can +// have different lengths and still not have any inexact overlap. +// +// InexactOverlap can be used to implement the requirements of the crypto/cipher +// AEAD, Block, BlockMode and Stream interfaces. +func InexactOverlap(x, y []byte) bool { + if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { + return false + } + return AnyOverlap(x, y) +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go new file mode 100644 index 000000000..592d18643 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -0,0 +1,219 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "golang.org/x/crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF { + if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go new file mode 100644 index 000000000..6f07582c3 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go new file mode 100644 index 000000000..e601e389f --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/canonical_text.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import "hash" + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + start := 0 + + for i, c := range buf { + switch cth.s { + case 0: + if c == '\r' { + cth.s = 1 + } else if c == '\n' { + cth.h.Write(buf[start:i]) + cth.h.Write(newline) + start = i + 1 + } + case 1: + cth.s = 0 + } + } + + cth.h.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 000000000..73f4fe378 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,122 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "golang.org/x/crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + s.ModInverse(s, priv.P) + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go new file mode 100644 index 000000000..eb0550b2d --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/errors/errors.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "golang.org/x/crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go new file mode 100644 index 000000000..faa2fb369 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -0,0 +1,693 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto/rsa" + "io" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// primaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) primaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// encryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) encryptionKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + // Iterate the keys to find the newest key + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt to, then we can obviously use it. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // This Entity appears to be signing only. + return Key{}, false +} + +// signingKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) signingKey(now time.Time) (Key, bool) { + candidateSubkey := -1 + + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.Sig.KeyExpired(now) { + candidateSubkey = i + break + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. + i := e.primaryIdentity() + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + !i.SelfSignature.KeyExpired(now) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + identity.SelfSignature = sig + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +const defaultRSAKeyBits = 2048 + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + signingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) + if err != nil { + return nil, err + } + + e := &Entity{ + PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), + Identities: make(map[string]*Identity), + } + isPrimaryId := true + e.Identities[uid.Id] = &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + + // If the user passes in a DefaultHash via packet.Config, + // set the PreferredHash for the SelfSignature. + if config != nil && config.DefaultHash != 0 { + e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} + } + + // Likewise for DefaultCipher. + if config != nil && config.DefaultCipher != 0 { + e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} + } + + e.Subkeys = make([]Subkey, 1) + e.Subkeys[0] = Subkey{ + PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), + PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), + Sig: &packet.Signature{ + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + e.Subkeys[0].PublicKey.IsSubkey = true + e.Subkeys[0].PrivateKey.IsSubkey = true + err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) + if err != nil { + return nil, err + } + return e, nil +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go new file mode 100644 index 000000000..e8f0b5caa --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "golang.org/x/crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go new file mode 100644 index 000000000..c76eecc96 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/config.go @@ -0,0 +1,91 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 000000000..02b372cf3 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 parsedMPI +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + default: + err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + return writeMPI(w, 8*uint16(len(cipherText)), cipherText) +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + err = writeBig(w, c1) + if err != nil { + return err + } + return writeBig(w, c2) +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go new file mode 100644 index 000000000..1a9ec6e51 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/literal.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.IsBinary = buf[0] == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go new file mode 100644 index 000000000..ce2a33a54 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go @@ -0,0 +1,143 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. If an incorrect key is detected then nil is returned. On +// successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if prefixCopy[blockSize-2] != prefixCopy[blockSize] || + prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { + return nil + } + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 000000000..171350339 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go new file mode 100644 index 000000000..456d807f2 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "golang.org/x/crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go new file mode 100644 index 000000000..5af64c542 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -0,0 +1,551 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "golang.org/x/crypto/openpgp/packet" + +import ( + "bufio" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rsa" + "io" + "math/big" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + for len(p) > 0 { + for power := uint(14); power < 32; power-- { + l := 1 << power + if len(p) >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(p[:l]) + n += m + if err != nil { + return + } + p = p[l:] + break + } + } + } + return +} + +func (w *partialLengthWriter) Close() error { + w.lengthByte[0] = 0 + _, err := w.w.Write(w.lengthByte[:]) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + var buf [6]byte + var n int + + buf[0] = 0x80 | 0x40 | byte(ptype) + if length < 192 { + buf[1] = byte(length) + n = 2 + } else if length < 8384 { + length -= 192 + buf[1] = 192 + byte(length>>8) + buf[2] = byte(length) + n = 3 + } else { + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 +) + +// peekVersion detects the version of a public key packet about to +// be read. A bufio.Reader at the original position of the io.Reader +// is returned. +func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { + bufr = bufio.NewReader(r) + var verBuf []byte + if verBuf, err = bufr.Peek(1); err != nil { + return + } + ver = verBuf[0] + return +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + var version byte + // Detect signature version + if contents, version, err = peekVersion(contents); err != nil { + return + } + if version < 4 { + p = new(SignatureV3) + } else { + p = new(Signature) + } + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + var version byte + if contents, version, err = peekVersion(contents); err != nil { + return + } + isSubkey := tag == packetTypePublicSubkey + if version < 4 { + p = &PublicKeyV3{IsSubkey: isSubkey} + } else { + p = &PublicKey{IsSubkey: isSubkey} + } + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + p = new(SymmetricallyEncrypted) + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction uint8 + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case Cipher3DES: + return 24 + case CipherCAST5: + return cast5.KeySize + case CipherAES128: + return 16 + case CipherAES192: + return 24 + case CipherAES256: + return 32 + } + return 0 +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + switch cipher { + case Cipher3DES: + return des.BlockSize + case CipherCAST5: + return 8 + case CipherAES128, CipherAES192, CipherAES256: + return 16 + } + return 0 +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + switch cipher { + case Cipher3DES: + block, _ = des.NewTripleDESCipher(key) + case CipherCAST5: + block, _ = cast5.NewCipher(key) + case CipherAES128, CipherAES192, CipherAES256: + block, _ = aes.NewCipher(key) + } + return +} + +// readMPI reads a big integer from r. The bit length returned is the bit +// length that was specified in r. This is preserved so that the integer can be +// reserialized exactly. +func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { + var buf [2]byte + _, err = readFull(r, buf[0:]) + if err != nil { + return + } + bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + numBytes := (int(bitLength) + 7) / 8 + mpi = make([]byte, numBytes) + _, err = readFull(r, mpi) + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. + return +} + +// writeMPI serializes a big integer to w. +func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. + _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) + if err == nil { + _, err = w.Write(mpiBytes) + } + return +} + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go new file mode 100644 index 000000000..6f8ec0938 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go @@ -0,0 +1,385 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. + sha1Checksum bool + iv []byte +} + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA or ECDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + + s2kType := buf[0] + + switch s2kType { + case 0: + pk.s2k = nil + pk.Encrypted = false + case 254, 255: + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.Encrypted = true + pk.s2k, err = s2k.Parse(r) + if err != nil { + return + } + if s2kType == 254 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + pk.encryptedData, err = ioutil.ReadAll(r) + if err != nil { + return + } + + if !pk.Encrypted { + return pk.parsePrivateKey(pk.encryptedData) + } + + return +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + // TODO(agl): support encrypted private keys + buf := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(buf) + if err != nil { + return + } + buf.WriteByte(0 /* no encryption */) + + privateKeyBuf := bytes.NewBuffer(nil) + + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(privateKeyBuf, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(privateKeyBuf, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(privateKeyBuf, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(privateKeyBuf, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + if err != nil { + return + } + + ptype := packetTypePrivateKey + contents := buf.Bytes() + privateKeyBytes := privateKeyBuf.Bytes() + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) + if err != nil { + return + } + _, err = w.Write(contents) + if err != nil { + return + } + _, err = w.Write(privateKeyBytes) + if err != nil { + return + } + + checksum := mod64kHash(privateKeyBytes) + var checksumBytes [2]byte + checksumBytes[0] = byte(checksum >> 8) + checksumBytes[1] = byte(checksum) + _, err = w.Write(checksumBytes[:]) + + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + err := writeBig(w, priv.D) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[1]) + if err != nil { + return err + } + err = writeBig(w, priv.Primes[0]) + if err != nil { + return err + } + return writeBig(w, priv.Precomputed.Qinv) +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + return writeBig(w, priv.X) +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + return writeBig(w, priv.D) +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + return pk.parsePrivateKey(data) +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + p, _, err := readMPI(buf) + if err != nil { + return + } + q, _, err := readMPI(buf) + if err != nil { + return + } + + rsaPriv.D = new(big.Int).SetBytes(d) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + priv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = priv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + + buf := bytes.NewBuffer(data) + d, _, err := readMPI(buf) + if err != nil { + return + } + + pk.PrivateKey = &ecdsa.PrivateKey{ + PublicKey: *ecdsaPub, + D: new(big.Int).SetBytes(d), + } + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go new file mode 100644 index 000000000..fcd5f5251 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -0,0 +1,753 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" +) + +var ( + // NIST curve P-256 + oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} + // NIST curve P-384 + oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} + // NIST curve P-521 + oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} +) + +const maxOIDLength = 8 + +// ecdsaKey stores the algorithm-specific fields for ECDSA keys. +// as defined in RFC 6637, Section 9. +type ecdsaKey struct { + // oid contains the OID byte sequence identifying the elliptic curve used + oid []byte + // p contains the elliptic curve point that represents the public key + p parsedMPI +} + +// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. +func parseOID(r io.Reader) (oid []byte, err error) { + buf := make([]byte, maxOIDLength) + if _, err = readFull(r, buf[:1]); err != nil { + return + } + oidLen := buf[0] + if int(oidLen) > len(buf) { + err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) + return + } + oid = buf[:oidLen] + _, err = readFull(r, oid) + return +} + +func (f *ecdsaKey) parse(r io.Reader) (err error) { + if f.oid, err = parseOID(r); err != nil { + return err + } + f.p.bytes, f.p.bitLength, err = readMPI(r) + return +} + +func (f *ecdsaKey) serialize(w io.Writer) (err error) { + buf := make([]byte, maxOIDLength+1) + buf[0] = byte(len(f.oid)) + copy(buf[1:], f.oid) + if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { + return + } + return writeMPIs(w, f.p) +} + +func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { + var c elliptic.Curve + if bytes.Equal(f.oid, oidCurveP256) { + c = elliptic.P256() + } else if bytes.Equal(f.oid, oidCurveP384) { + c = elliptic.P384() + } else if bytes.Equal(f.oid, oidCurveP521) { + c = elliptic.P521() + } else { + return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) + } + x, y := elliptic.Unmarshal(c, f.p.bytes) + if x == nil { + return nil, errors.UnsupportedError("failed to parse EC point") + } + return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil +} + +func (f *ecdsaKey) byteLen() int { + return 1 + len(f.oid) + 2 + len(f.p.bytes) +} + +type kdfHashFunction byte +type kdfAlgorithm byte + +// ecdhKdf stores key derivation function parameters +// used for ECDH encryption. See RFC 6637, Section 9. +type ecdhKdf struct { + KdfHash kdfHashFunction + KdfAlgo kdfAlgorithm +} + +func (f *ecdhKdf) parse(r io.Reader) (err error) { + buf := make([]byte, 1) + if _, err = readFull(r, buf); err != nil { + return + } + kdfLen := int(buf[0]) + if kdfLen < 3 { + return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + buf = make([]byte, kdfLen) + if _, err = readFull(r, buf); err != nil { + return + } + reserved := int(buf[0]) + f.KdfHash = kdfHashFunction(buf[1]) + f.KdfAlgo = kdfAlgorithm(buf[2]) + if reserved != 0x01 { + return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) + } + return +} + +func (f *ecdhKdf) serialize(w io.Writer) (err error) { + buf := make([]byte, 4) + // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. + buf[0] = byte(0x03) // Length of the following fields + buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now + buf[2] = byte(f.KdfHash) + buf[3] = byte(f.KdfAlgo) + _, err = w.Write(buf[:]) + return +} + +func (f *ecdhKdf) byteLen() int { + return 4 +} + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey + Fingerprint [20]byte + KeyId uint64 + IsSubkey bool + + n, e, p, q, g, y parsedMPI + + // RFC 6637 fields + ec *ecdsaKey + ecdh *ecdhKdf +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +func fromBig(n *big.Int) parsedMPI { + return parsedMPI{ + bytes: n.Bytes(), + bitLength: uint16(n.BitLen()), + } +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: fromBig(pub.P), + q: fromBig(pub.Q), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: fromBig(pub.P), + g: fromBig(pub.G), + y: fromBig(pub.Y), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + ec: new(ecdsaKey), + } + + switch pub.Curve { + case elliptic.P256(): + pk.ec.oid = oidCurveP256 + case elliptic.P384(): + pk.ec.oid = oidCurveP384 + case elliptic.P521(): + pk.ec.oid = oidCurveP521 + default: + panic("unknown elliptic curve") + } + + pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return err + } + pk.PublicKey, err = pk.ec.newECDSA() + case PubKeyAlgoECDH: + pk.ec = new(ecdsaKey) + if err = pk.ec.parse(r); err != nil { + return + } + pk.ecdh = new(ecdhKdf) + if err = pk.ecdh.parse(r); err != nil { + return + } + // The ECDH key is stored in an ecdsa.PublicKey for convenience. + pk.PublicKey, err = pk.ec.newECDSA() + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKey) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := sha1.New() + pk.SerializeSignaturePrefix(fingerPrint) + pk.serializeWithoutHeaders(fingerPrint) + copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n.bytes, pk.n.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.e.bytes, pk.e.bitLength, err = readMPI(r) + if err != nil { + return + } + + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.bytes), + E: 0, + } + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.q.bytes, pk.q.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.bytes) + dsa.Q = new(big.Int).SetBytes(pk.q.bytes) + dsa.G = new(big.Int).SetBytes(pk.g.bytes) + dsa.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p.bytes, pk.p.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.g.bytes, pk.g.bitLength, err = readMPI(r) + if err != nil { + return + } + pk.y.bytes, pk.y.bitLength, err = readMPI(r) + if err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.bytes) + elgamal.G = new(big.Int).SetBytes(pk.g.bytes) + elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) + pk.PublicKey = elgamal + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + case PubKeyAlgoDSA: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.q.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoElGamal: + pLength += 2 + uint16(len(pk.p.bytes)) + pLength += 2 + uint16(len(pk.g.bytes)) + pLength += 2 + uint16(len(pk.y.bytes)) + case PubKeyAlgoECDSA: + pLength += uint16(pk.ec.byteLen()) + case PubKeyAlgoECDH: + pLength += uint16(pk.ec.byteLen()) + pLength += uint16(pk.ecdh.byteLen()) + default: + panic("unknown public key algorithm") + } + pLength += 6 + h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + case PubKeyAlgoDSA: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.q.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoElGamal: + length += 2 + len(pk.p.bytes) + length += 2 + len(pk.g.bytes) + length += 2 + len(pk.y.bytes) + case PubKeyAlgoECDSA: + length += pk.ec.byteLen() + case PubKeyAlgoECDH: + length += pk.ec.byteLen() + length += pk.ecdh.byteLen() + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [6]byte + buf[0] = 4 + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + buf[5] = byte(pk.PubKeyAlgo) + + _, err = w.Write(buf[:]) + if err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + case PubKeyAlgoDSA: + return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) + case PubKeyAlgoElGamal: + return writeMPIs(w, pk.p, pk.g, pk.y) + case PubKeyAlgoECDSA: + return pk.ec.serialize(w) + case PubKeyAlgoECDH: + if err = pk.ec.serialize(w); err != nil { + return + } + return pk.ecdh.serialize(w) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + case PubKeyAlgoDSA: + dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { + return errors.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + signed.SerializeSignaturePrefix(h) + signed.serializeWithoutHeaders(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// A parsedMPI is used to store the contents of a big integer, along with the +// bit length that was specified in the original input. This allows the MPI to +// be reserialized exactly. +type parsedMPI struct { + bytes []byte + bitLength uint16 +} + +// writeMPIs is a utility function for serializing several big integers to the +// given Writer. +func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { + for _, mpi := range mpis { + err = writeMPI(w, mpi.bitLength, mpi.bytes) + if err != nil { + return + } + } + return +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + case PubKeyAlgoDSA: + bitLength = pk.p.bitLength + case PubKeyAlgoElGamal: + bitLength = pk.p.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go new file mode 100644 index 000000000..5daf7b6cf --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go @@ -0,0 +1,279 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/md5" + "crypto/rsa" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" +) + +// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and +// should not be used for signing or encrypting. They are supported here only for +// parsing version 3 key material and validating signatures. +// See RFC 4880, section 5.5.2. +type PublicKeyV3 struct { + CreationTime time.Time + DaysToExpire uint16 + PubKeyAlgo PublicKeyAlgorithm + PublicKey *rsa.PublicKey + Fingerprint [16]byte + KeyId uint64 + IsSubkey bool + + n, e parsedMPI +} + +// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. +// Included here for testing purposes only. RFC 4880, section 5.5.2: +// "an implementation MUST NOT generate a V3 key, but MAY accept it." +func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { + pk := &PublicKeyV3{ + CreationTime: creationTime, + PublicKey: pub, + n: fromBig(pub.N), + e: fromBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerPrintAndKeyId() + return pk +} + +func (pk *PublicKeyV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [8]byte + if _, err = readFull(r, buf[:]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + return errors.UnsupportedError("public key version") + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerPrintAndKeyId() + return +} + +func (pk *PublicKeyV3) setFingerPrintAndKeyId() { + // RFC 4880, section 12.2 + fingerPrint := md5.New() + fingerPrint.Write(pk.n.bytes) + fingerPrint.Write(pk.e.bytes) + fingerPrint.Sum(pk.Fingerprint[:0]) + pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { + if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { + return + } + if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { + return + } + + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } + if len(pk.e.bytes) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} + for i := 0; i < len(pk.e.bytes); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.bytes[i]) + } + pk.PublicKey = rsa + return +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { + var pLength uint16 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + pLength += 2 + uint16(len(pk.n.bytes)) + pLength += 2 + uint16(len(pk.e.bytes)) + default: + panic("unknown public key algorithm") + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) + return +} + +func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { + length := 8 // 8 byte header + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += 2 + len(pk.n.bytes) + length += 2 + len(pk.e.bytes) + default: + panic("unknown public key algorithm") + } + + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + if err = serializeHeader(w, packetType, length); err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { + var buf [8]byte + // Version 3 + buf[0] = 3 + // Creation time + t := uint32(pk.CreationTime.Unix()) + buf[1] = byte(t >> 24) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 8) + buf[4] = byte(t) + // Days to expire + buf[5] = byte(pk.DaysToExpire >> 8) + buf[6] = byte(pk.DaysToExpire) + // Public key algorithm + buf[7] = byte(pk.PubKeyAlgo) + + if _, err = w.Write(buf[:]); err != nil { + return + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + return writeMPIs(w, pk.n, pk.e) + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKeyV3) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly +} + +// VerifySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + + suffix := make([]byte, 5) + suffix[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) + signed.Write(suffix) + hashBytes := signed.Sum(nil) + + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + return errors.SignatureError("RSA verification failure") + } + return + default: + // V3 public keys only support RSA. + panic("shouldn't happen") + } +} + +// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := userIdSignatureV3Hash(id, pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignatureV3(h, sig) +} + +// userIdSignatureV3Hash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { + if !hfn.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hfn.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + h.Write([]byte(id)) + + return +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKeyV3) KeyIdString() string { + return fmt.Sprintf("%X", pk.KeyId) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKeyV3) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.bitLength + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go new file mode 100644 index 000000000..34bc7c613 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "golang.org/x/crypto/openpgp/errors" + "io" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go new file mode 100644 index 000000000..b2a24a532 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature.go @@ -0,0 +1,731 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + CreationTime time.Time + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI + ECDSASigR, ECDSASigS parsedMPI + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + IssuerKeyId *uint64 + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // MDC is set if this signature has a feature packet that indicates + // support for MDC subpackets. + MDC bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + l := 6 + hashedSubpacketsLength + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + copy(sig.HashSuffix[1:], buf[:5]) + hashedSubpackets := sig.HashSuffix[6:l] + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + // See RFC 4880, section 5.2.4 + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = uint8(l >> 24) + trailer[3] = uint8(l >> 16) + trailer[4] = uint8(l >> 8) + trailer[5] = uint8(l) + + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + } + case PubKeyAlgoECDSA: + sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) + if err == nil { + sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. In practice, the subpacket is used exclusively to + // indicate support for MDC-protected encryption. + sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired. +func (sig *Signature) KeyExpired(currentTime time.Time) bool { + if sig.KeyLifetimeSecs == nil { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix() (err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + + var ok bool + l := 6 + hashedSubpacketsLen + sig.HashSuffix = make([]byte, l+6) + sig.HashSuffix[0] = 4 + sig.HashSuffix[1] = uint8(sig.SigType) + sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) + sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) + sig.HashSuffix[5] = byte(hashedSubpacketsLen) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + trailer := sig.HashSuffix[l:] + trailer[0] = 4 + trailer[1] = 0xff + trailer[2] = byte(l >> 24) + trailer[3] = byte(l >> 16) + trailer[4] = byte(l >> 8) + trailer[5] = byte(l) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + err = sig.buildHashSuffix() + if err != nil { + return + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + sig.outSubpackets = sig.buildSubpackets() + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR.bytes = r.Bytes() + sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) + sig.DSASigS.bytes = s.Bytes() + sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = fromBig(r) + sig.ECDSASigS = fromBig(s) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = 2 + len(sig.RSASignature.bytes) + case PubKeyAlgoDSA: + sigLength = 2 + len(sig.DSASigR.bytes) + sigLength += 2 + len(sig.DSASigS.bytes) + case PubKeyAlgoECDSA: + sigLength = 2 + len(sig.ECDSASigR.bytes) + sigLength += 2 + len(sig.ECDSASigS.bytes) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + + _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) + if err != nil { + return + } + + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + case PubKeyAlgoECDSA: + err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) + } + + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go new file mode 100644 index 000000000..6edff8893 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go @@ -0,0 +1,146 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// SignatureV3 represents older version 3 signatures. These signatures are less secure +// than version 4 and should not be used to create new signatures. They are included +// here for backwards compatibility to read and validate with older key material. +// See RFC 4880, section 5.2.2. +type SignatureV3 struct { + SigType SignatureType + CreationTime time.Time + IssuerKeyId uint64 + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + HashTag [2]byte + + RSASignature parsedMPI + DSASigR, DSASigS parsedMPI +} + +func (sig *SignatureV3) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.2 + var buf [8]byte + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] < 2 || buf[0] > 3 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + if _, err = readFull(r, buf[:1]); err != nil { + return + } + if buf[0] != 5 { + err = errors.UnsupportedError( + "invalid hashed material length " + strconv.Itoa(int(buf[0]))) + return + } + + // Read hashed material: signature type + creation time + if _, err = readFull(r, buf[:5]); err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + t := binary.BigEndian.Uint32(buf[1:5]) + sig.CreationTime = time.Unix(int64(t), 0) + + // Eight-octet Key ID of signer. + if _, err = readFull(r, buf[:8]); err != nil { + return + } + sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) + + // Public-key and hash algorithm + if _, err = readFull(r, buf[:2]); err != nil { + return + } + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + var ok bool + if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + // Two-octet field holding left 16 bits of signed hash value. + if _, err = readFull(r, sig.HashTag[:2]); err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) + case PubKeyAlgoDSA: + if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { + return + } + sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) + default: + panic("unreachable") + } + return +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *SignatureV3) Serialize(w io.Writer) (err error) { + buf := make([]byte, 8) + + // Write the sig type and creation time + buf[0] = byte(sig.SigType) + binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + // Write the issuer long key ID + binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) + if _, err = w.Write(buf[:8]); err != nil { + return + } + + // Write public key algorithm, hash ID, and hash value + buf[0] = byte(sig.PubKeyAlgo) + hashId, ok := s2k.HashToHashId(sig.Hash) + if !ok { + return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) + } + buf[1] = hashId + copy(buf[2:4], sig.HashTag[:]) + if _, err = w.Write(buf[:4]); err != nil { + return + } + + if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPIs(w, sig.RSASignature) + case PubKeyAlgoDSA: + err = writeMPIs(w, sig.DSASigR, sig.DSASigS) + default: + panic("impossible") + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 000000000..744c2d2c4 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + CipherFunc CipherFunction + s2k func(out, in []byte) + encryptedKey []byte +} + +const symmetricKeyEncryptedVersion = 4 + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + if buf[0] != symmetricKeyEncryptedVersion { + return errors.UnsupportedError("SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + var err error + ske.s2k, err = s2k.Parse(r) + if err != nil { + return err + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + + "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") + } + return plaintextKey, cipherFunc, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The +// packet contains a random session key, encrypted by a key derived from the +// given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + var buf [2]byte + buf[0] = symmetricKeyEncryptedVersion + buf[1] = byte(cipherFunc) + _, err = w.Write(buf[:]) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + + key = sessionKey + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 000000000..6126030eb --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" + "hash" + "io" + "strconv" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted contents of the +// packet can be read. An incorrect key can, with high probability, be detected +// immediately and this will result in a KeyIncorrect error being returned. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + if s == nil { + return nil, errors.ErrKeyIncorrect + } + + plaintext := cipher.StreamReader{S: s, R: se.contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.SignatureError("error during reading") + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.SignatureError("error during reading") + } + } + + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.SignatureError("MDC packet not found") + } + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.SignatureError("hash mismatch") + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go new file mode 100644 index 000000000..d19ffbc78 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + sp.Serialize(&buf) + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go new file mode 100644 index 000000000..d6bea7d4a --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go new file mode 100644 index 000000000..6ec664f44 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/read.go @@ -0,0 +1,442 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "golang.org/x/crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + SignatureError error // nil if the signature is good. + Signature *packet.Signature // the signature packet itself, if v4 (default) + SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + var se *packet.SymmetricallyEncrypted + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted: + se = p + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + } + if len(pk.encryptedKey.Key) == 0 { + continue + } + decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + if err == nil { + decrypted, err = se.Decrypt(cipherFunc, key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + return readSignedMessage(packets, md, keyring) +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if !p.IsLast { + return nil, errors.UnsupportedError("nested signatures") + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md = nil + return + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.SignedBy != nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (n int, err error) { + n, err = cr.md.LiteralData.Body.Read(buf) + if err == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + return +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails +} + +func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { + n, err = scr.md.LiteralData.Body.Read(buf) + scr.wrappedHash.Write(buf[:n]) + if err == io.EOF { + var p packet.Packet + p, scr.md.SignatureError = scr.packets.Next() + if scr.md.SignatureError != nil { + return + } + + var ok bool + if scr.md.Signature, ok = p.(*packet.Signature); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) + } else { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") + return + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + err = mdcErr + } + } + } + return +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + switch sig := p.(type) { + case *packet.Signature: + err = key.PublicKey.VerifySignature(h, sig) + case *packet.SignatureV3: + err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") + } + + if err == nil { + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body) +} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go new file mode 100644 index 000000000..4b9a44ca2 --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "golang.org/x/crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "golang.org/x/crypto/openpgp/errors" +) + +// Config collects configuration parameters for s2k key-stretching +// transformatioms. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // Hash is the default hash function to be used. If + // nil, SHA1 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + // SHA1 is the historical default in this package. + return crypto.SHA1 + } + + return c.Hash +} + +func (c *Config) encodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 96 // The common case. Correspoding to 65536 + } + + i := c.S2KCount + switch { + // Behave like GPG. Should we make 65536 the lowest value used? + case i < 1024: + i = 1024 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 1024 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 0; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + hash, ok := HashIdToHash(buf[1]) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) + } + if !hash.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) + } + h := hash.New() + + switch buf[0] { + case 0: + f := func(out, in []byte) { + Simple(out, h, in) + } + return f, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return + } + f := func(out, in []byte) { + Salted(out, h, in, buf[:8]) + } + return f, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return + } + count := decodeCount(buf[8]) + f := func(out, in []byte) { + Iterated(out, h, in, buf[:8], count) + } + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + var buf [11]byte + buf[0] = 3 /* iterated and salted */ + buf[1], _ = HashToHashId(c.hash()) + salt := buf[2:10] + if _, err := io.ReadFull(rand, salt); err != nil { + return err + } + encodedCount := c.encodedCount() + count := decodeCount(encodedCount) + buf[10] = encodedCount + if _, err := w.Write(buf[:]); err != nil { + return err + } + + Iterated(key, c.hash().New(), passphrase, salt, count) + return nil +} + +// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +var hashToHashIdMapping = []struct { + id byte + hash crypto.Hash + name string +}{ + {1, crypto.MD5, "MD5"}, + {2, crypto.SHA1, "SHA1"}, + {3, crypto.RIPEMD160, "RIPEMD160"}, + {8, crypto.SHA256, "SHA256"}, + {9, crypto.SHA384, "SHA384"}, + {10, crypto.SHA512, "SHA512"}, + {11, crypto.SHA224, "SHA224"}, +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.hash, true + } + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + for _, m := range hashToHashIdMapping { + if m.id == id { + return m.name, true + } + } + + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for _, m := range hashToHashIdMapping { + if m.hash == h { + return m.id, true + } + } + return 0, false +} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go new file mode 100644 index 000000000..4ee71784e --- /dev/null +++ b/vendor/golang.org/x/crypto/openpgp/write.go @@ -0,0 +1,418 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/errors" + "golang.org/x/crypto/openpgp/packet" + "golang.org/x/crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sig.IssuerKeyId = &signer.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + io.Copy(wrappedHash, message) + + err = sig.Sign(h, signer.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + + literaldata := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literaldata, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.signingKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: packet.SigTypeBinary, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil + } + return literalData, nil +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] + defaultHashes := candidateHashes[len(candidateHashes)-1:] + + encryptKeys := make([]Key, len(to)) + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].encryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].primaryIdentity().SelfSignature + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) + if err != nil { + return + } + + return writeAndSign(payload, candidateHashes, signed, hints, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[len(candidateHashes)-1:] + preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + h hash.Hash + signer *packet.PrivateKey + config *packet.Config +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.h.Write(data) + return s.literalData.Write(data) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + SigType: packet.SigTypeBinary, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go new file mode 100644 index 000000000..a8dd589ae --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!ppc64le gccgo appengine + +package poly1305 + +type mac struct{ macGeneric } + +func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 000000000..d076a5623 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,83 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package poly1305 implements Poly1305 one-time message authentication code as +// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. +// +// Poly1305 is a fast, one-time authentication function. It is infeasible for an +// attacker to generate an authenticator for a message without the key. However, a +// key must only be used for a single message. Authenticating two different +// messages with the same key allows an attacker to forge authenticators for other +// messages with the same key. +// +// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +// used with a fixed key in order to generate one-time keys from an nonce. +// However, in this package AES isn't used and the one-time key is specified +// directly. +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Verify returns true if mac is a valid authenticator for m with the given +// key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} + +// New returns a new MAC computing an authentication +// tag of all data written to it with the given key. +// This allows writing the message progressively instead +// of passing it as a single slice. Common users should use +// the Sum function instead. +// +// The key must be unique for each message, as authenticating +// two different messages with the same key allows an attacker +// to forge messages at will. +func New(key *[32]byte) *MAC { + return &MAC{ + mac: newMAC(key), + finalized: false, + } +} + +// MAC is an io.Writer computing an authentication tag +// of the data written to it. +// +// MAC cannot be used like common hash.Hash implementations, +// because using a poly1305 key twice breaks its security. +// Therefore writing data to a running MAC after calling +// Sum causes it to panic. +type MAC struct { + mac // platform-dependent implementation + + finalized bool +} + +// Size returns the number of bytes Sum will return. +func (h *MAC) Size() int { return TagSize } + +// Write adds more data to the running message authentication code. +// It never returns an error. +// +// It must not be called after the first call of Sum. +func (h *MAC) Write(p []byte) (n int, err error) { + if h.finalized { + panic("poly1305: write to MAC after Sum") + } + return h.mac.Write(p) +} + +// Sum computes the authenticator of all data written to the +// message authentication code. +func (h *MAC) Sum(b []byte) []byte { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return append(b, mac[:]...) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 000000000..2dbf42aa5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package poly1305 + +//go:noescape +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 000000000..7d600f13c --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,148 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) +TEXT ·update(SB), $0-32 + MOVQ state+0(FP), DI + MOVQ msg_base+8(FP), SI + MOVQ msg_len+16(FP), R15 + + MOVQ 0(DI), R8 // h0 + MOVQ 8(DI), R9 // h1 + MOVQ 16(DI), R10 // h2 + MOVQ 24(DI), R11 // r0 + MOVQ 32(DI), R12 // r1 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, 0(DI) + MOVQ R9, 8(DI) + MOVQ R10, 16(DI) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVQ state+0(FP), DI + MOVQ key+8(FP), SI + + // state[0...7] is initialized with zero + MOVOU 0(SI), X0 + MOVOU 16(SI), X1 + MOVOU ·poly1305Mask<>(SB), X2 + PAND X2, X0 + MOVOU X0, 24(DI) + MOVOU X1, 40(DI) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVQ tag+0(FP), DI + MOVQ state+8(FP), SI + + MOVQ 0(SI), AX + MOVQ 8(SI), BX + MOVQ 16(SI), CX + MOVQ AX, R8 + MOVQ BX, R9 + SUBQ $0xFFFFFFFFFFFFFFFB, AX + SBBQ $0xFFFFFFFFFFFFFFFF, BX + SBBQ $3, CX + CMOVQCS R8, AX + CMOVQCS R9, BX + ADDQ 40(SI), AX + ADCQ 48(SI), BX + + MOVQ AX, 0(DI) + MOVQ BX, 8(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go new file mode 100644 index 000000000..5dc321c2f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +package poly1305 + +// This function is implemented in sum_arm.s +//go:noescape +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s new file mode 100644 index 000000000..f70b4ac48 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.s @@ -0,0 +1,427 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +#include "textflag.h" + +// This code was translated into a form compatible with 5a from the public +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. + +DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff +DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 +DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff +DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff +DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff +GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 + +// Warning: the linker may use R11 to synthesize certain instructions. Please +// take care and verify that no synthetic instructions use it. + +TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 + // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It + // might look like it's only 60 bytes of space but the final four bytes + // will be written by another function.) We need to skip over four + // bytes of stack because that's saving the value of 'g'. + ADD $4, R13, R8 + MOVM.IB [R4-R7], (R8) + MOVM.IA.W (R1), [R2-R5] + MOVW $·poly1305_init_constants_armv6<>(SB), R7 + MOVW R2, R8 + MOVW R2>>26, R9 + MOVW R3>>20, g + MOVW R4>>14, R11 + MOVW R5>>8, R12 + ORR R3<<6, R9, R9 + ORR R4<<12, g, g + ORR R5<<18, R11, R11 + MOVM.IA (R7), [R2-R6] + AND R8, R2, R2 + AND R9, R3, R3 + AND g, R4, R4 + AND R11, R5, R5 + AND R12, R6, R6 + MOVM.IA.W [R2-R6], (R0) + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + MOVM.IA.W [R2-R6], (R0) + MOVM.IA.W (R1), [R2-R5] + MOVM.IA [R2-R6], (R0) + ADD $20, R13, R0 + MOVM.DA (R0), [R4-R7] + RET + +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ + MOVBU (offset+0)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+0)(Rdst); \ + MOVBU (offset+1)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+1)(Rdst); \ + MOVBU (offset+2)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+2)(Rdst); \ + MOVBU (offset+3)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+3)(Rdst) + +TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 + // Needs 24 bytes of stack for saved registers and then 88 bytes of + // scratch space after that. We assume that 24 bytes at (R13) have + // already been used: four bytes for the link register saved in the + // prelude of poly1305_auth_armv6, four bytes for saving the value of g + // in that function and 16 bytes of scratch space used around + // poly1305_finish_ext_armv6_skip1. + ADD $24, R13, R12 + MOVM.IB [R4-R8, R14], (R12) + MOVW R0, 88(R13) + MOVW R1, 92(R13) + MOVW R2, 96(R13) + MOVW R1, R14 + MOVW R2, R12 + MOVW 56(R0), R8 + WORD $0xe1180008 // TST R8, R8 not working see issue 5921 + EOR R6, R6, R6 + MOVW.EQ $(1<<24), R6 + MOVW R6, 84(R13) + ADD $116, R13, g + MOVM.IA (R0), [R0-R9] + MOVM.IA [R0-R4], (g) + CMP $16, R12 + BLO poly1305_blocks_armv6_done + +poly1305_blocks_armv6_mainloop: + WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 + BEQ poly1305_blocks_armv6_mainloop_aligned + ADD $100, R13, g + MOVW_UNALIGNED(R14, g, R0, 0) + MOVW_UNALIGNED(R14, g, R0, 4) + MOVW_UNALIGNED(R14, g, R0, 8) + MOVW_UNALIGNED(R14, g, R0, 12) + MOVM.IA (g), [R0-R3] + ADD $16, R14 + B poly1305_blocks_armv6_mainloop_loaded + +poly1305_blocks_armv6_mainloop_aligned: + MOVM.IA.W (R14), [R0-R3] + +poly1305_blocks_armv6_mainloop_loaded: + MOVW R0>>26, g + MOVW R1>>20, R11 + MOVW R2>>14, R12 + MOVW R14, 92(R13) + MOVW R3>>8, R4 + ORR R1<<6, g, g + ORR R2<<12, R11, R11 + ORR R3<<18, R12, R12 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, g, g + MOVW 84(R13), R3 + BIC $0xfc000000, R11, R11 + BIC $0xfc000000, R12, R12 + ADD R0, R5, R5 + ADD g, R6, R6 + ORR R3, R4, R4 + ADD R11, R7, R7 + ADD $116, R13, R14 + ADD R12, R8, R8 + ADD R4, R9, R9 + MOVM.IA (R14), [R0-R4] + MULLU R4, R5, (R11, g) + MULLU R3, R5, (R14, R12) + MULALU R3, R6, (R11, g) + MULALU R2, R6, (R14, R12) + MULALU R2, R7, (R11, g) + MULALU R1, R7, (R14, R12) + ADD R4<<2, R4, R4 + ADD R3<<2, R3, R3 + MULALU R1, R8, (R11, g) + MULALU R0, R8, (R14, R12) + MULALU R0, R9, (R11, g) + MULALU R4, R9, (R14, R12) + MOVW g, 76(R13) + MOVW R11, 80(R13) + MOVW R12, 68(R13) + MOVW R14, 72(R13) + MULLU R2, R5, (R11, g) + MULLU R1, R5, (R14, R12) + MULALU R1, R6, (R11, g) + MULALU R0, R6, (R14, R12) + MULALU R0, R7, (R11, g) + MULALU R4, R7, (R14, R12) + ADD R2<<2, R2, R2 + ADD R1<<2, R1, R1 + MULALU R4, R8, (R11, g) + MULALU R3, R8, (R14, R12) + MULALU R3, R9, (R11, g) + MULALU R2, R9, (R14, R12) + MOVW g, 60(R13) + MOVW R11, 64(R13) + MOVW R12, 52(R13) + MOVW R14, 56(R13) + MULLU R0, R5, (R11, g) + MULALU R4, R6, (R11, g) + MULALU R3, R7, (R11, g) + MULALU R2, R8, (R11, g) + MULALU R1, R9, (R11, g) + ADD $52, R13, R0 + MOVM.IA (R0), [R0-R7] + MOVW g>>26, R12 + MOVW R4>>26, R14 + ORR R11<<6, R12, R12 + ORR R5<<6, R14, R14 + BIC $0xfc000000, g, g + BIC $0xfc000000, R4, R4 + ADD.S R12, R0, R0 + ADC $0, R1, R1 + ADD.S R14, R6, R6 + ADC $0, R7, R7 + MOVW R0>>26, R12 + MOVW R6>>26, R14 + ORR R1<<6, R12, R12 + ORR R7<<6, R14, R14 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, R6, R6 + ADD R14<<2, R14, R14 + ADD.S R12, R2, R2 + ADC $0, R3, R3 + ADD R14, g, g + MOVW R2>>26, R12 + MOVW g>>26, R14 + ORR R3<<6, R12, R12 + BIC $0xfc000000, g, R5 + BIC $0xfc000000, R2, R7 + ADD R12, R4, R4 + ADD R14, R0, R0 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R8 + ADD R12, R6, R9 + MOVW 96(R13), R12 + MOVW 92(R13), R14 + MOVW R0, R6 + CMP $32, R12 + SUB $16, R12, R12 + MOVW R12, 96(R13) + BHS poly1305_blocks_armv6_mainloop + +poly1305_blocks_armv6_done: + MOVW 88(R13), R12 + MOVW R5, 20(R12) + MOVW R6, 24(R12) + MOVW R7, 28(R12) + MOVW R8, 32(R12) + MOVW R9, 36(R12) + ADD $48, R13, R0 + MOVM.DA (R0), [R4-R8, R14] + RET + +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst); \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst) + +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) + +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) +TEXT ·poly1305_auth_armv6(SB), $196-16 + // The value 196, just above, is the sum of 64 (the size of the context + // structure) and 132 (the amount of stack needed). + // + // At this point, the stack pointer (R13) has been moved down. It + // points to the saved link register and there's 196 bytes of free + // space above it. + // + // The stack for this function looks like: + // + // +--------------------- + // | + // | 64 bytes of context structure + // | + // +--------------------- + // | + // | 112 bytes for poly1305_blocks_armv6 + // | + // +--------------------- + // | 16 bytes of final block, constructed at + // | poly1305_finish_ext_armv6_skip8 + // +--------------------- + // | four bytes of saved 'g' + // +--------------------- + // | lr, saved by prelude <- R13 points here + // +--------------------- + MOVW g, 4(R13) + + MOVW out+0(FP), R4 + MOVW m+4(FP), R5 + MOVW mlen+8(FP), R6 + MOVW key+12(FP), R7 + + ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 + MOVW R7, R1 + + // poly1305_init_ext_armv6 will write to the stack from R13+4, but + // that's ok because none of the other values have been written yet. + BL poly1305_init_ext_armv6<>(SB) + BIC.S $15, R6, R2 + BEQ poly1305_auth_armv6_noblocks + ADD $136, R13, R0 + MOVW R5, R1 + ADD R2, R5, R5 + SUB R2, R6, R6 + BL poly1305_blocks_armv6<>(SB) + +poly1305_auth_armv6_noblocks: + ADD $136, R13, R0 + MOVW R5, R1 + MOVW R6, R2 + MOVW R4, R3 + + MOVW R0, R5 + MOVW R1, R6 + MOVW R2, R7 + MOVW R3, R8 + AND.S R2, R2, R2 + BEQ poly1305_finish_ext_armv6_noremaining + EOR R0, R0 + ADD $8, R13, R9 // 8 = offset to 16 byte scratch space + MOVW R0, (R9) + MOVW R0, 4(R9) + MOVW R0, 8(R9) + MOVW R0, 12(R9) + WORD $0xe3110003 // TST R1, #3 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_aligned + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8 + MOVWP_UNALIGNED(R1, R9, g) + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip8: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4 + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip4: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHUP_UNALIGNED(R1, R9, g) + B poly1305_finish_ext_armv6_skip2 + +poly1305_finish_ext_armv6_aligned: + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8_aligned + MOVM.IA.W (R1), [g-R11] + MOVM.IA.W [g-R11], (R9) + +poly1305_finish_ext_armv6_skip8_aligned: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4_aligned + MOVW.P 4(R1), g + MOVW.P g, 4(R9) + +poly1305_finish_ext_armv6_skip4_aligned: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHU.P 2(R1), g + MOVH.P g, 2(R9) + +poly1305_finish_ext_armv6_skip2: + WORD $0xe3120001 // TST $1, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip1 + MOVBU.P 1(R1), g + MOVBU.P g, 1(R9) + +poly1305_finish_ext_armv6_skip1: + MOVW $1, R11 + MOVBU R11, 0(R9) + MOVW R11, 56(R5) + MOVW R5, R0 + ADD $8, R13, R1 + MOVW $16, R2 + BL poly1305_blocks_armv6<>(SB) + +poly1305_finish_ext_armv6_noremaining: + MOVW 20(R5), R0 + MOVW 24(R5), R1 + MOVW 28(R5), R2 + MOVW 32(R5), R3 + MOVW 36(R5), R4 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R4 + ADD R12<<2, R12, R12 + ADD R12, R0, R0 + MOVW R0>>26, R12 + BIC $0xfc000000, R0, R0 + ADD R12, R1, R1 + MOVW R1>>26, R12 + BIC $0xfc000000, R1, R1 + ADD R12, R2, R2 + MOVW R2>>26, R12 + BIC $0xfc000000, R2, R2 + ADD R12, R3, R3 + MOVW R3>>26, R12 + BIC $0xfc000000, R3, R3 + ADD R12, R4, R4 + ADD $5, R0, R6 + MOVW R6>>26, R12 + BIC $0xfc000000, R6, R6 + ADD R12, R1, R7 + MOVW R7>>26, R12 + BIC $0xfc000000, R7, R7 + ADD R12, R2, g + MOVW g>>26, R12 + BIC $0xfc000000, g, g + ADD R12, R3, R11 + MOVW $-(1<<26), R12 + ADD R11>>26, R12, R12 + BIC $0xfc000000, R11, R11 + ADD R12, R4, R9 + MOVW R9>>31, R12 + SUB $1, R12 + AND R12, R6, R6 + AND R12, R7, R7 + AND R12, g, g + AND R12, R11, R11 + AND R12, R9, R9 + MVN R12, R12 + AND R12, R0, R0 + AND R12, R1, R1 + AND R12, R2, R2 + AND R12, R3, R3 + AND R12, R4, R4 + ORR R6, R0, R0 + ORR R7, R1, R1 + ORR g, R2, R2 + ORR R11, R3, R3 + ORR R9, R4, R4 + ORR R1<<26, R0, R0 + MOVW R1>>6, R1 + ORR R2<<20, R1, R1 + MOVW R2>>12, R2 + ORR R3<<14, R2, R2 + MOVW R3>>18, R3 + ORR R4<<8, R3, R3 + MOVW 40(R5), R6 + MOVW 44(R5), R7 + MOVW 48(R5), g + MOVW 52(R5), R11 + ADD.S R6, R0, R0 + ADC.S R7, R1, R1 + ADC.S g, R2, R2 + ADC.S R11, R3, R3 + MOVM.IA [R0-R3], (R8) + MOVW R5, R12 + EOR R0, R0, R0 + EOR R1, R1, R1 + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + EOR R7, R7, R7 + MOVM.IA.W [R0-R7], (R12) + MOVM.IA [R0-R7], (R12) + MOVW 4(R13), g + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go new file mode 100644 index 000000000..bab76ef0d --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -0,0 +1,172 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poly1305 + +import "encoding/binary" + +const ( + msgBlock = uint32(1 << 24) + finalBlock = uint32(0) +) + +// sumGeneric generates an authenticator for msg using a one-time key and +// puts the 16-byte result into out. This is the generic implementation of +// Sum and should be called if no assembly implementation is available. +func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMACGeneric(key) + h.Write(msg) + h.Sum(out) +} + +func newMACGeneric(key *[32]byte) (h macGeneric) { + h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff + h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 + h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff + h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff + h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff + + h.s[0] = binary.LittleEndian.Uint32(key[16:]) + h.s[1] = binary.LittleEndian.Uint32(key[20:]) + h.s[2] = binary.LittleEndian.Uint32(key[24:]) + h.s[3] = binary.LittleEndian.Uint32(key[28:]) + return +} + +type macGeneric struct { + h, r [5]uint32 + s [4]uint32 + + buffer [TagSize]byte + offset int +} + +func (h *macGeneric) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r)) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + updateGeneric(p, msgBlock, &(h.h), &(h.r)) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *macGeneric) Sum(out *[16]byte) { + H, R := h.h, h.r + if h.offset > 0 { + var buffer [TagSize]byte + copy(buffer[:], h.buffer[:h.offset]) + buffer[h.offset] = 1 // invariant: h.offset < TagSize + updateGeneric(buffer[:], finalBlock, &H, &R) + } + finalizeGeneric(out, &H, &(h.s)) +} + +func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] + r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4]) + R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 + + for len(msg) >= TagSize { + // h += msg + h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + + msg = msg[TagSize:] + } + + h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4 +} + +func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) { + h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] + + // h %= p reduction + h2 += h1 >> 26 + h1 &= 0x3ffffff + h3 += h2 >> 26 + h2 &= 0x3ffffff + h4 += h3 >> 26 + h3 &= 0x3ffffff + h0 += 5 * (h4 >> 26) + h4 &= 0x3ffffff + h1 += h0 >> 26 + h0 &= 0x3ffffff + + // h - p + t0 := h0 + 5 + t1 := h1 + (t0 >> 26) + t2 := h2 + (t1 >> 26) + t3 := h3 + (t2 >> 26) + t4 := h4 + (t3 >> 26) - (1 << 26) + t0 &= 0x3ffffff + t1 &= 0x3ffffff + t2 &= 0x3ffffff + t3 &= 0x3ffffff + + // select h if h < p else h - p + t_mask := (t4 >> 31) - 1 + h_mask := ^t_mask + h0 = (h0 & h_mask) | (t0 & t_mask) + h1 = (h1 & h_mask) | (t1 & t_mask) + h2 = (h2 & h_mask) | (t2 & t_mask) + h3 = (h3 & h_mask) | (t3 & t_mask) + h4 = (h4 & h_mask) | (t4 & t_mask) + + // h %= 2^128 + h0 |= h1 << 26 + h1 = ((h1 >> 6) | (h2 << 20)) + h2 = ((h2 >> 12) | (h3 << 14)) + h3 = ((h3 >> 18) | (h4 << 8)) + + // s: the s part of the key + // tag = (h + s) % (2^128) + t := uint64(h0) + uint64(s[0]) + h0 = uint32(t) + t = uint64(h1) + uint64(s[1]) + (t >> 32) + h1 = uint32(t) + t = uint64(h2) + uint64(s[2]) + (t >> 32) + h2 = uint32(t) + t = uint64(h3) + uint64(s[3]) + (t >> 32) + h3 = uint32(t) + + binary.LittleEndian.PutUint32(out[0:], h0) + binary.LittleEndian.PutUint32(out[4:], h1) + binary.LittleEndian.PutUint32(out[8:], h2) + binary.LittleEndian.PutUint32(out[12:], h3) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go new file mode 100644 index 000000000..8a9c2070b --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl + +package poly1305 + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { + h := newMAC(key) + h.Write(msg) + h.Sum(out) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go new file mode 100644 index 000000000..2402b6371 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -0,0 +1,68 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +package poly1305 + +//go:noescape +func initialize(state *[7]uint64, key *[32]byte) + +//go:noescape +func update(state *[7]uint64, msg []byte) + +//go:noescape +func finalize(tag *[TagSize]byte, state *[7]uint64) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := newMAC(key) + h.Write(m) + h.Sum(out) +} + +func newMAC(key *[32]byte) (h mac) { + initialize(&h.state, key) + return +} + +type mac struct { + state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } + + buffer [TagSize]byte + offset int +} + +func (h *mac) Write(p []byte) (n int, err error) { + n = len(p) + if h.offset > 0 { + remaining := TagSize - h.offset + if n < remaining { + h.offset += copy(h.buffer[h.offset:], p) + return n, nil + } + copy(h.buffer[h.offset:], p[:remaining]) + p = p[remaining:] + h.offset = 0 + update(&h.state, h.buffer[:]) + } + if nn := len(p) - (len(p) % TagSize); nn > 0 { + update(&h.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + h.offset += copy(h.buffer[h.offset:], p) + } + return n, nil +} + +func (h *mac) Sum(out *[16]byte) { + state := h.state + if h.offset > 0 { + update(&state, h.buffer[:h.offset]) + } + finalize(out, &state) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s new file mode 100644 index 000000000..55c7167ec --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -0,0 +1,247 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ppc64le,!gccgo,!appengine + +#include "textflag.h" + +// This was ported from the amd64 implementation. + +#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ + MOVD (msg), t0; \ + MOVD 8(msg), t1; \ + MOVD $1, t2; \ + ADDC t0, h0, h0; \ + ADDE t1, h1, h1; \ + ADDE t2, h2; \ + ADD $16, msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ + MULLD r0, h0, t0; \ + MULLD r0, h1, t4; \ + MULHDU r0, h0, t1; \ + MULHDU r0, h1, t5; \ + ADDC t4, t1, t1; \ + MULLD r0, h2, t2; \ + ADDZE t5; \ + MULHDU r1, h0, t4; \ + MULLD r1, h0, h0; \ + ADD t5, t2, t2; \ + ADDC h0, t1, t1; \ + MULLD h2, r1, t3; \ + ADDZE t4, h0; \ + MULHDU r1, h1, t5; \ + MULLD r1, h1, t4; \ + ADDC t4, t2, t2; \ + ADDE t5, t3, t3; \ + ADDC h0, t2, t2; \ + MOVD $-4, t4; \ + MOVD t0, h0; \ + MOVD t1, h1; \ + ADDZE t3; \ + ANDCC $3, t2, h2; \ + AND t2, t4, t0; \ + ADDC t0, h0, h0; \ + ADDE t3, h1, h1; \ + SLD $62, t3, t4; \ + SRD $2, t2; \ + ADDZE h2; \ + OR t4, t2, t2; \ + SRD $2, t3; \ + ADDC t2, h0, h0; \ + ADDE t3, h1, h1; \ + ADDZE h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func update(state *[7]uint64, msg []byte) + +TEXT ·update(SB), $0-32 + MOVD state+0(FP), R3 + MOVD msg_base+8(FP), R4 + MOVD msg_len+16(FP), R5 + + MOVD 0(R3), R8 // h0 + MOVD 8(R3), R9 // h1 + MOVD 16(R3), R10 // h2 + MOVD 24(R3), R11 // r0 + MOVD 32(R3), R12 // r1 + + CMP R5, $16 + BLT bytes_between_0_and_15 + +loop: + POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) + ADD $-16, R5 + CMP R5, $16 + BGE loop + +bytes_between_0_and_15: + CMP $0, R5 + BEQ done + MOVD $0, R16 // h0 + MOVD $0, R17 // h1 + +flush_buffer: + CMP R5, $8 + BLE just1 + + MOVD $8, R21 + SUB R21, R5, R21 + + // Greater than 8 -- load the rightmost remaining bytes in msg + // and put into R17 (h1) + MOVD (R4)(R21), R17 + MOVD $16, R22 + + // Find the offset to those bytes + SUB R5, R22, R22 + SLD $3, R22 + + // Shift to get only the bytes in msg + SRD R22, R17, R17 + + // Put 1 at high end + MOVD $1, R23 + SLD $3, R21 + SLD R21, R23, R23 + OR R23, R17, R17 + + // Remainder is 8 + MOVD $8, R5 + +just1: + CMP R5, $8 + BLT less8 + + // Exactly 8 + MOVD (R4), R16 + + CMP $0, R17 + + // Check if we've already set R17; if not + // set 1 to indicate end of msg. + BNE carry + MOVD $1, R17 + BR carry + +less8: + MOVD $0, R16 // h0 + MOVD $0, R22 // shift count + CMP R5, $4 + BLT less4 + MOVWZ (R4), R16 + ADD $4, R4 + ADD $-4, R5 + MOVD $32, R22 + +less4: + CMP R5, $2 + BLT less2 + MOVHZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $16, R22 + ADD $-2, R5 + ADD $2, R4 + +less2: + CMP $0, R5 + BEQ insert1 + MOVBZ (R4), R21 + SLD R22, R21, R21 + OR R16, R21, R16 + ADD $8, R22 + +insert1: + // Insert 1 at end of msg + MOVD $1, R21 + SLD R22, R21, R21 + OR R16, R21, R16 + +carry: + // Add new values to h0, h1, h2 + ADDC R16, R8 + ADDE R17, R9 + ADDE $0, R10 + MOVD $16, R5 + ADD R5, R4 + BR multiply + +done: + // Save h0, h1, h2 in state + MOVD R8, 0(R3) + MOVD R9, 8(R3) + MOVD R10, 16(R3) + RET + +// func initialize(state *[7]uint64, key *[32]byte) +TEXT ·initialize(SB), $0-16 + MOVD state+0(FP), R3 + MOVD key+8(FP), R4 + + // state[0...7] is initialized with zero + // Load key + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + MOVD 24(R4), R8 + + // Address of key mask + MOVD $·poly1305Mask<>(SB), R9 + + // Save original key in state + MOVD R7, 40(R3) + MOVD R8, 48(R3) + + // Get mask + MOVD (R9), R7 + MOVD 8(R9), R8 + + // And with key + AND R5, R7, R5 + AND R6, R8, R6 + + // Save masked key in state + MOVD R5, 24(R3) + MOVD R6, 32(R3) + RET + +// func finalize(tag *[TagSize]byte, state *[7]uint64) +TEXT ·finalize(SB), $0-16 + MOVD tag+0(FP), R3 + MOVD state+8(FP), R4 + + // Get h0, h1, h2 from state + MOVD 0(R4), R5 + MOVD 8(R4), R6 + MOVD 16(R4), R7 + + // Save h0, h1 + MOVD R5, R8 + MOVD R6, R9 + MOVD $3, R20 + MOVD $-1, R21 + SUBC $-5, R5 + SUBE R21, R6 + SUBE R20, R7 + MOVD $0, R21 + SUBZE R21 + + // Check for carry + CMP $0, R21 + ISEL $2, R5, R8, R5 + ISEL $2, R6, R9, R6 + MOVD 40(R4), R8 + MOVD 48(R4), R9 + ADDC R8, R5 + ADDE R9, R6 + MOVD R5, 0(R3) + MOVD R6, 8(R3) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go new file mode 100644 index 000000000..ec99e07e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +package poly1305 + +import ( + "golang.org/x/sys/cpu" +) + +// poly1305vx is an assembly implementation of Poly1305 that uses vector +// instructions. It must only be called if the vector facility (vx) is +// available. +//go:noescape +func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// poly1305vmsl is an assembly implementation of Poly1305 that uses vector +// instructions, including VMSL. It must only be called if the vector facility (vx) is +// available and if VMSL is supported. +//go:noescape +func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + if cpu.S390X.HasVX { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + if cpu.S390X.HasVXE && len(m) > 256 { + poly1305vmsl(out, mPtr, uint64(len(m)), key) + } else { + poly1305vx(out, mPtr, uint64(len(m)), key) + } + } else { + sumGeneric(out, m, key) + } +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s new file mode 100644 index 000000000..ca5a309d8 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -0,0 +1,378 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx). + +// constants +#define MOD26 V0 +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 + +// key (r) +#define R_0 V9 +#define R_1 V10 +#define R_2 V11 +#define R_3 V12 +#define R_4 V13 +#define R5_1 V14 +#define R5_2 V15 +#define R5_3 V16 +#define R5_4 V17 +#define RSAVE_0 R5 +#define RSAVE_1 R6 +#define RSAVE_2 R7 +#define RSAVE_3 R8 +#define RSAVE_4 R9 +#define R5SAVE_1 V28 +#define R5SAVE_2 V29 +#define R5SAVE_3 V30 +#define R5SAVE_4 V31 + +// message block +#define F_0 V18 +#define F_1 V19 +#define F_2 V20 +#define F_3 V21 +#define F_4 V22 + +// accumulator +#define H_0 V23 +#define H_1 V24 +#define H_2 V25 +#define H_3 V26 +#define H_4 V27 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $64 +// MOD26 +DATA ·constants<>+0(SB)/8, $0x3ffffff +DATA ·constants<>+8(SB)/8, $0x3ffffff +// EX0 +DATA ·constants<>+16(SB)/8, $0x0006050403020100 +DATA ·constants<>+24(SB)/8, $0x1016151413121110 +// EX1 +DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 +// EX2 +DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d + +// h = (f*g) % (2**130-5) [partial reduction] +#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ + VMLOF f0, g0, h0 \ + VMLOF f0, g1, h1 \ + VMLOF f0, g2, h2 \ + VMLOF f0, g3, h3 \ + VMLOF f0, g4, h4 \ + VMLOF f1, g54, T_0 \ + VMLOF f1, g0, T_1 \ + VMLOF f1, g1, T_2 \ + VMLOF f1, g2, T_3 \ + VMLOF f1, g3, T_4 \ + VMALOF f2, g53, h0, h0 \ + VMALOF f2, g54, h1, h1 \ + VMALOF f2, g0, h2, h2 \ + VMALOF f2, g1, h3, h3 \ + VMALOF f2, g2, h4, h4 \ + VMALOF f3, g52, T_0, T_0 \ + VMALOF f3, g53, T_1, T_1 \ + VMALOF f3, g54, T_2, T_2 \ + VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g1, T_4, T_4 \ + VMALOF f4, g51, h0, h0 \ + VMALOF f4, g52, h1, h1 \ + VMALOF f4, g53, h2, h2 \ + VMALOF f4, g54, h3, h3 \ + VMALOF f4, g0, h4, h4 \ + VAG T_0, h0, h0 \ + VAG T_1, h1, h1 \ + VAG T_2, h2, h2 \ + VAG T_3, h3, h3 \ + VAG T_4, h4, h4 + +// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 +#define REDUCE(h0, h1, h2, h3, h4) \ + VESRLG $26, h0, T_0 \ + VESRLG $26, h3, T_1 \ + VN MOD26, h0, h0 \ + VN MOD26, h3, h3 \ + VAG T_0, h1, h1 \ + VAG T_1, h4, h4 \ + VESRLG $26, h1, T_2 \ + VESRLG $26, h4, T_3 \ + VN MOD26, h1, h1 \ + VN MOD26, h4, h4 \ + VESLG $2, T_3, T_4 \ + VAG T_3, T_4, T_4 \ + VAG T_2, h2, h2 \ + VAG T_4, h0, h0 \ + VESRLG $26, h2, T_0 \ + VESRLG $26, h0, T_1 \ + VN MOD26, h2, h2 \ + VN MOD26, h0, h0 \ + VAG T_0, h3, h3 \ + VAG T_1, h1, h1 \ + VESRLG $26, h3, T_2 \ + VN MOD26, h3, h3 \ + VAG T_2, h4, h4 + +// expand in0 into d[0] and in1 into d[1] +#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ + VGBM $0x0707, d1 \ // d1=tmp + VPERM in0, in1, EX2, d4 \ + VPERM in0, in1, EX0, d0 \ + VPERM in0, in1, EX1, d2 \ + VN d1, d4, d4 \ + VESRLG $26, d0, d1 \ + VESRLG $30, d2, d3 \ + VESRLG $4, d2, d2 \ + VN MOD26, d0, d0 \ + VN MOD26, d1, d1 \ + VN MOD26, d2, d2 \ + VN MOD26, d3, d3 + +// pack h4:h0 into h1:h0 (no carry) +#define PACK(h0, h1, h2, h3, h4) \ + VESLG $26, h1, h1 \ + VESLG $26, h3, h3 \ + VO h0, h1, h0 \ + VO h2, h3, h2 \ + VESLG $4, h2, h2 \ + VLEIB $7, $48, h1 \ + VSLB h1, h2, h2 \ + VO h0, h2, h0 \ + VLEIB $7, $104, h1 \ + VSLB h1, h4, h3 \ + VO h3, h0, h0 \ + VLEIB $7, $24, h1 \ + VSRLB h1, h4, h1 + +// if h > 2**130-5 then h -= 2**130-5 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 + +// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vx(SB), $0-32 + // This code processes up to 2 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + + // load MOD26, EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), MOD26, EX2 + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) + + // setup r*5 + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + + // store r (for final block) + VMLOF T_0, R_1, R5SAVE_1 + VMLOF T_0, R_2, R5SAVE_2 + VMLOF T_0, R_3, R5SAVE_3 + VMLOF T_0, R_4, R5SAVE_4 + VLGVG $0, R_0, RSAVE_0 + VLGVG $0, R_1, RSAVE_1 + VLGVG $0, R_2, RSAVE_2 + VLGVG $0, R_3, RSAVE_3 + VLGVG $0, R_4, RSAVE_4 + + // skip r**2 calculation + CMPBLE R3, $16, skip + + // calculate r**2 + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + VLEIG $0, $5, T_0 + VLEIG $1, $5, T_0 + VMLOF T_0, H_1, R5_1 + VMLOF T_0, H_2, R5_2 + VMLOF T_0, H_3, R5_3 + VMLOF T_0, H_4, R5_4 + VLR H_0, R_0 + VLR H_1, R_1 + VLR H_2, R_2 + VLR H_3, R_3 + VLR H_4, R_4 + + // initialize h + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + +loop: + CMPBLE R3, $32, b2 + VLM (R2), T_0, T_1 + SUB $32, R3 + MOVD $32(R2), R2 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + VLEIB $4, $1, F_4 + VLEIB $12, $1, F_4 + +multiply: + VAG H_0, F_0, F_0 + VAG H_1, F_1, F_1 + VAG H_2, F_2, F_2 + VAG H_3, F_3, F_3 + VAG H_4, F_4, F_4 + MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + REDUCE(H_0, H_1, H_2, H_3, H_4) + CMPBNE R3, $0, loop + +finish: + // sum vectors + VZERO T_0 + VSUMQG H_0, T_0, H_0 + VSUMQG H_1, T_0, H_1 + VSUMQG H_2, T_0, H_2 + VSUMQG H_3, T_0, H_3 + VSUMQG H_4, T_0, H_4 + + // h may be >= 2*(2**130-5) so we need to reduce it again + REDUCE(H_0, H_1, H_2, H_3, H_4) + + // carry h1->h4 + VESRLG $26, H_1, T_1 + VN MOD26, H_1, H_1 + VAQ T_1, H_2, H_2 + VESRLG $26, H_2, T_2 + VN MOD26, H_2, H_2 + VAQ T_2, H_3, H_3 + VESRLG $26, H_3, T_3 + VN MOD26, H_3, H_3 + VAQ T_3, H_4, H_4 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H_0, H_1, H_2, H_3, H_4) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H_0, H_1, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H_0, H_0 + VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) + VST H_0, (R1) + + RET + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + SUB $17, R3 + VL (R2), T_0 + VLL R3, 16(R2), T_1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $12, $1, F_4 + VLEIB $4, $1, F_4 + + // setup [r²,r] + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, RSAVE_3, R_3 + VLVGG $1, RSAVE_4, R_4 + VPDI $0, R5_1, R5SAVE_1, R5_1 + VPDI $0, R5_2, R5SAVE_2, R5_2 + VPDI $0, R5_3, R5SAVE_3, R5_3 + VPDI $0, R5_4, R5SAVE_4, R5_4 + + MOVD $0, R3 + BR multiply + +skip: + VZERO H_0 + VZERO H_1 + VZERO H_2 + VZERO H_3 + VZERO H_4 + + CMPBEQ R3, $0, finish + +b1: + // 1 block remaining + SUB $1, R3 + VLL R3, (R2), T_0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, T_0 + VZERO T_1 + EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + CMPBNE R3, $16, 2(PC) + VLEIB $4, $1, F_4 + VLEIG $1, $1, R_0 + VZERO R_1 + VZERO R_2 + VZERO R_3 + VZERO R_4 + VZERO R5_1 + VZERO R5_2 + VZERO R5_3 + VZERO R5_4 + + // setup [r, 1] + VLVGG $0, RSAVE_0, R_0 + VLVGG $0, RSAVE_1, R_1 + VLVGG $0, RSAVE_2, R_2 + VLVGG $0, RSAVE_3, R_3 + VLVGG $0, RSAVE_4, R_4 + VPDI $0, R5SAVE_1, R5_1, R5_1 + VPDI $0, R5SAVE_2, R5_2, R5_2 + VPDI $0, R5SAVE_3, R5_3, R5_3 + VPDI $0, R5SAVE_4, R5_4, R5_4 + + MOVD $0, R3 + BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s new file mode 100644 index 000000000..e60bbc1d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s @@ -0,0 +1,909 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x,go1.11,!gccgo,!appengine + +#include "textflag.h" + +// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. + +// constants +#define EX0 V1 +#define EX1 V2 +#define EX2 V3 + +// temporaries +#define T_0 V4 +#define T_1 V5 +#define T_2 V6 +#define T_3 V7 +#define T_4 V8 +#define T_5 V9 +#define T_6 V10 +#define T_7 V11 +#define T_8 V12 +#define T_9 V13 +#define T_10 V14 + +// r**2 & r**4 +#define R_0 V15 +#define R_1 V16 +#define R_2 V17 +#define R5_1 V18 +#define R5_2 V19 +// key (r) +#define RSAVE_0 R7 +#define RSAVE_1 R8 +#define RSAVE_2 R9 +#define R5SAVE_1 R10 +#define R5SAVE_2 R11 + +// message block +#define M0 V20 +#define M1 V21 +#define M2 V22 +#define M3 V23 +#define M4 V24 +#define M5 V25 + +// accumulator +#define H0_0 V26 +#define H1_0 V27 +#define H2_0 V28 +#define H0_1 V29 +#define H1_1 V30 +#define H2_1 V31 + +GLOBL ·keyMask<>(SB), RODATA, $16 +DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f +DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f + +GLOBL ·bswapMask<>(SB), RODATA, $16 +DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 +DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 + +GLOBL ·constants<>(SB), RODATA, $48 +// EX0 +DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+8(SB)/8, $0x0000050403020100 +// EX1 +DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+24(SB)/8, $0x00000a0908070605 +// EX2 +DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f +DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b + +GLOBL ·c<>(SB), RODATA, $48 +// EX0 +DATA ·c<>+0(SB)/8, $0x0000050403020100 +DATA ·c<>+8(SB)/8, $0x0000151413121110 +// EX1 +DATA ·c<>+16(SB)/8, $0x00000a0908070605 +DATA ·c<>+24(SB)/8, $0x00001a1918171615 +// EX2 +DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b +DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b + +GLOBL ·reduce<>(SB), RODATA, $32 +// 44 bit +DATA ·reduce<>+0(SB)/8, $0x0 +DATA ·reduce<>+8(SB)/8, $0xfffffffffff +// 42 bit +DATA ·reduce<>+16(SB)/8, $0x0 +DATA ·reduce<>+24(SB)/8, $0x3ffffffffff + +// h = (f*g) % (2**130-5) [partial reduction] +// uses T_0...T_9 temporary registers +// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 +// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 +#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ + \ // Eliminate the dependency for the last 2 VMSLs + VMSLG m02_0, r_2, m4_2, m4_2 \ + VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined + VMSLG m02_0, r_0, m4_0, m4_0 \ + VMSLG m02_1, r5_2, V0, T_0 \ + VMSLG m02_0, r_1, m4_1, m4_1 \ + VMSLG m02_1, r_0, V0, T_1 \ + VMSLG m02_1, r_1, V0, T_2 \ + VMSLG m02_2, r5_1, V0, T_3 \ + VMSLG m02_2, r5_2, V0, T_4 \ + VMSLG m13_0, r_0, m5_0, m5_0 \ + VMSLG m13_1, r5_2, V0, T_5 \ + VMSLG m13_0, r_1, m5_1, m5_1 \ + VMSLG m13_1, r_0, V0, T_6 \ + VMSLG m13_1, r_1, V0, T_7 \ + VMSLG m13_2, r5_1, V0, T_8 \ + VMSLG m13_2, r5_2, V0, T_9 \ + VMSLG m02_2, r_0, m4_2, m4_2 \ + VMSLG m13_2, r_0, m5_2, m5_2 \ + VAQ m4_0, T_0, m02_0 \ + VAQ m4_1, T_1, m02_1 \ + VAQ m5_0, T_5, m13_0 \ + VAQ m5_1, T_6, m13_1 \ + VAQ m02_0, T_3, m02_0 \ + VAQ m02_1, T_4, m02_1 \ + VAQ m13_0, T_8, m13_0 \ + VAQ m13_1, T_9, m13_1 \ + VAQ m4_2, T_2, m02_2 \ + VAQ m5_2, T_7, m13_2 \ + +// SQUARE uses three limbs of r and r_2*5 to output square of r +// uses T_1, T_5 and T_7 temporary registers +// input: r_0, r_1, r_2, r5_2 +// temp: TEMP0, TEMP1, TEMP2 +// output: p0, p1, p2 +#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ + VMSLG r_0, r_0, p0, p0 \ + VMSLG r_1, r5_2, V0, TEMP0 \ + VMSLG r_2, r5_2, p1, p1 \ + VMSLG r_0, r_1, V0, TEMP1 \ + VMSLG r_1, r_1, p2, p2 \ + VMSLG r_0, r_2, V0, TEMP2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + VAQ TEMP0, p0, p0 \ + VAQ TEMP1, p1, p1 \ + VAQ TEMP2, p2, p2 \ + +// carry h0->h1->h2->h0 || h3->h4->h5->h3 +// uses T_2, T_4, T_5, T_7, T_8, T_9 +// t6, t7, t8, t9, t10, t11 +// input: h0, h1, h2, h3, h4, h5 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 +// output: h0, h1, h2, h3, h4, h5 +#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ + VLM (R12), t6, t7 \ // 44 and 42 bit clear mask + VLEIB $7, $0x28, t10 \ // 5 byte shift mask + VREPIB $4, t8 \ // 4 bit shift mask + VREPIB $2, t11 \ // 2 bit shift mask + VSRLB t10, h0, t0 \ // h0 byte shift + VSRLB t10, h1, t1 \ // h1 byte shift + VSRLB t10, h2, t2 \ // h2 byte shift + VSRLB t10, h3, t3 \ // h3 byte shift + VSRLB t10, h4, t4 \ // h4 byte shift + VSRLB t10, h5, t5 \ // h5 byte shift + VSRL t8, t0, t0 \ // h0 bit shift + VSRL t8, t1, t1 \ // h2 bit shift + VSRL t11, t2, t2 \ // h2 bit shift + VSRL t8, t3, t3 \ // h3 bit shift + VSRL t8, t4, t4 \ // h4 bit shift + VESLG $2, t2, t9 \ // h2 carry x5 + VSRL t11, t5, t5 \ // h5 bit shift + VN t6, h0, h0 \ // h0 clear carry + VAQ t2, t9, t2 \ // h2 carry x5 + VESLG $2, t5, t9 \ // h5 carry x5 + VN t6, h1, h1 \ // h1 clear carry + VN t7, h2, h2 \ // h2 clear carry + VAQ t5, t9, t5 \ // h5 carry x5 + VN t6, h3, h3 \ // h3 clear carry + VN t6, h4, h4 \ // h4 clear carry + VN t7, h5, h5 \ // h5 clear carry + VAQ t0, h1, h1 \ // h0->h1 + VAQ t3, h4, h4 \ // h3->h4 + VAQ t1, h2, h2 \ // h1->h2 + VAQ t4, h5, h5 \ // h4->h5 + VAQ t2, h0, h0 \ // h2->h0 + VAQ t5, h3, h3 \ // h5->h3 + VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves + VREPG $1, t7, t7 \ + VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] + VSLDB $8, h1, h1, h1 \ + VSLDB $8, h2, h2, h2 \ + VO h0, h3, h3 \ + VO h1, h4, h4 \ + VO h2, h5, h5 \ + VESRLG $44, h3, t0 \ // 44 bit shift right + VESRLG $44, h4, t1 \ + VESRLG $42, h5, t2 \ + VN t6, h3, h3 \ // clear carry bits + VN t6, h4, h4 \ + VN t7, h5, h5 \ + VESLG $2, t2, t9 \ // multiply carry by 5 + VAQ t9, t2, t2 \ + VAQ t0, h4, h4 \ + VAQ t1, h5, h5 \ + VAQ t2, h3, h3 \ + +// carry h0->h1->h2->h0 +// input: h0, h1, h2 +// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 +// output: h0, h1, h2 +#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ + VLEIB $7, $0x28, t3 \ // 5 byte shift mask + VREPIB $4, t4 \ // 4 bit shift mask + VREPIB $2, t7 \ // 2 bit shift mask + VGBM $0x003F, t5 \ // mask to clear carry bits + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VESRLG $4, t5, t5 \ // 44 bit clear mask + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VESRLG $2, t5, t6 \ // 42 bit clear mask + VESLG $2, t2, t8 \ + VAQ t8, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + VSRLB t3, h0, t0 \ + VSRLB t3, h1, t1 \ + VSRLB t3, h2, t2 \ + VSRL t4, t0, t0 \ + VSRL t4, t1, t1 \ + VSRL t7, t2, t2 \ + VN t5, h0, h0 \ + VN t5, h1, h1 \ + VESLG $2, t2, t8 \ + VN t6, h2, h2 \ + VAQ t0, h1, h1 \ + VAQ t8, t2, t2 \ + VAQ t1, h2, h2 \ + VAQ t2, h0, h0 \ + +// expands two message blocks into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in1, in2, d0, d1, d2, d3, d4, d5 +// temp: TEMP0, TEMP1, TEMP2, TEMP3 +// output: d0, d1, d2, d3, d4, d5 +#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ + VGBM $0xff3f, TEMP0 \ + VGBM $0xff1f, TEMP1 \ + VESLG $4, d1, TEMP2 \ + VESLG $4, d4, TEMP3 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in1, d0, EX0, d0 \ + VPERM in2, d3, EX0, d3 \ + VPERM in1, d2, EX2, d2 \ + VPERM in2, d5, EX2, d5 \ + VPERM in1, TEMP2, EX1, d1 \ + VPERM in2, TEMP3, EX1, d4 \ + VN TEMP0, d0, d0 \ + VN TEMP0, d3, d3 \ + VESRLG $4, d1, d1 \ + VESRLG $4, d4, d4 \ + VN TEMP1, d2, d2 \ + VN TEMP1, d5, d5 \ + VN TEMP0, d1, d1 \ + VN TEMP0, d4, d4 \ + +// expands one message block into the lower halfs of the d registers +// moves the contents of the d registers into upper halfs +// input: in, d0, d1, d2 +// temp: TEMP0, TEMP1, TEMP2 +// output: d0, d1, d2 +#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ + VGBM $0xff3f, TEMP0 \ + VESLG $4, d1, TEMP2 \ + VGBM $0xff1f, TEMP1 \ + VPERM in, d0, EX0, d0 \ + VESRLG $4, TEMP0, TEMP0 \ + VPERM in, d2, EX2, d2 \ + VPERM in, TEMP2, EX1, d1 \ + VN TEMP0, d0, d0 \ + VN TEMP1, d2, d2 \ + VESRLG $4, d1, d1 \ + VN TEMP0, d1, d1 \ + +// pack h2:h0 into h1:h0 (no carry) +// input: h0, h1, h2 +// output: h0, h1, h2 +#define PACK(h0, h1, h2) \ + VMRLG h1, h2, h2 \ // copy h1 to upper half h2 + VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 + VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 + VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 + VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 + VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) + VLEIG $0, $0, h2 \ // clear upper half of h2 + VESRLG $40, h2, h1 \ // h1 now has upper two bits of result + VLEIB $7, $88, h1 \ // for byte shift (11 bytes) + VSLB h1, h2, h2 \ // shift h2 11 bytes to the left + VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 + VLEIG $0, $0, h1 \ // clear upper half of h1 + +// if h > 2**130-5 then h -= 2**130-5 +// input: h0, h1 +// temp: t0, t1, t2 +// output: h0 +#define MOD(h0, h1, t0, t1, t2) \ + VZERO t0 \ + VLEIG $1, $5, t0 \ + VACCQ h0, t0, t1 \ + VAQ h0, t0, t0 \ + VONE t2 \ + VLEIG $1, $-4, t2 \ + VAQ t2, t1, t1 \ + VACCQ h1, t1, t1 \ + VONE t2 \ + VAQ t2, t1, t1 \ + VN h0, t1, t2 \ + VNC t0, t1, t1 \ + VO t1, t2, h0 \ + +// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305vmsl(SB), $0-32 + // This code processes 6 + up to 4 blocks (32 bytes) per iteration + // using the algorithm described in: + // NEON crypto, Daniel J. Bernstein & Peter Schwabe + // https://cryptojedi.org/papers/neoncrypto-20120320.pdf + // And as moddified for VMSL as described in + // Accelerating Poly1305 Cryptographic Message Authentication on the z14 + // O'Farrell et al, CASCON 2017, p48-55 + // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht + + LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key + VZERO V0 // c + + // load EX0, EX1 and EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 // c + + // setup r + VL (R4), T_0 + MOVD $·keyMask<>(SB), R6 + VL (R6), T_1 + VN T_0, T_1, T_0 + VZERO T_2 // limbs for r + VZERO T_3 + VZERO T_4 + EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) + + // T_2, T_3, T_4: [0, r] + + // setup r*20 + VLEIG $0, $0, T_0 + VLEIG $1, $20, T_0 // T_0: [0, 20] + VZERO T_5 + VZERO T_6 + VMSLG T_0, T_3, T_5, T_5 + VMSLG T_0, T_4, T_6, T_6 + + // store r for final block in GR + VLGVG $1, T_2, RSAVE_0 // c + VLGVG $1, T_3, RSAVE_1 // c + VLGVG $1, T_4, RSAVE_2 // c + VLGVG $1, T_5, R5SAVE_1 // c + VLGVG $1, T_6, R5SAVE_2 // c + + // initialize h + VZERO H0_0 + VZERO H1_0 + VZERO H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + // initialize pointer for reduce constants + MOVD $·reduce<>(SB), R12 + + // calculate r**2 and 20*(r**2) + VZERO R_0 + VZERO R_1 + VZERO R_2 + SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) + REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) + VZERO R5_1 + VZERO R5_2 + VMSLG T_0, R_1, R5_1, R5_1 + VMSLG T_0, R_2, R5_2, R5_2 + + // skip r**4 calculation if 3 blocks or less + CMPBLE R3, $48, b4 + + // calculate r**4 and 20*(r**4) + VZERO T_8 + VZERO T_9 + VZERO T_10 + SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) + REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) + VZERO T_2 + VZERO T_3 + VMSLG T_0, T_9, T_2, T_2 + VMSLG T_0, T_10, T_3, T_3 + + // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 + VSLDB $8, T_8, T_8, T_8 + VSLDB $8, T_9, T_9, T_9 + VSLDB $8, T_10, T_10, T_10 + VSLDB $8, T_2, T_2, T_2 + VSLDB $8, T_3, T_3, T_3 + + VO T_8, R_0, R_0 + VO T_9, R_1, R_1 + VO T_10, R_2, R_2 + VO T_2, R5_1, R5_1 + VO T_3, R5_2, R5_2 + + CMPBLE R3, $80, load // less than or equal to 5 blocks in message + + // 6(or 5+1) blocks + SUB $81, R3 + VLM (R2), M0, M4 + VLL R3, 80(R2), M5 + ADD $1, R3 + MOVBZ $1, R0 + CMPBGE R3, $16, 2(PC) + VLVGB R3, R0, M5 + MOVD $96(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $2, $1, H2_0 + VLEIB $2, $1, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO T_4 + VZERO T_10 + EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M4 + VLEIB $10, $1, M2 + CMPBLT R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + SUB $16, R3 + CMPBLE R3, $0, square + +load: + // load EX0, EX1 and EX2 + MOVD $·c<>(SB), R5 + VLM (R5), EX0, EX2 + +loop: + CMPBLE R3, $64, add // b4 // last 4 or less blocks left + + // next 4 full blocks + VLM (R2), M2, M5 + SUB $64, R3 + MOVD $64(R2), R2 + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) + + // expacc in-lined to create [m2, m3] limbs + VGBM $0x3f3f, T_0 // 44 bit clear mask + VGBM $0x1f1f, T_1 // 40 bit clear mask + VPERM M2, M3, EX0, T_3 + VESRLG $4, T_0, T_0 // 44 bit clear mask ready + VPERM M2, M3, EX1, T_4 + VPERM M2, M3, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG H0_1, T_3, H0_0 + VMRHG H1_1, T_4, H1_0 + VMRHG H2_1, T_5, H2_0 + VMRLG H0_1, T_3, H0_1 + VMRLG H1_1, T_4, H1_1 + VMRLG H2_1, T_5, H2_1 + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VPERM M4, M5, EX0, T_3 + VPERM M4, M5, EX1, T_4 + VPERM M4, M5, EX2, T_5 + VN T_0, T_3, T_3 + VESRLG $4, T_4, T_4 + VN T_1, T_5, T_5 + VN T_0, T_4, T_4 + VMRHG V0, T_3, M0 + VMRHG V0, T_4, M1 + VMRHG V0, T_5, M2 + VMRLG V0, T_3, M3 + VMRLG V0, T_4, M4 + VMRLG V0, T_5, M5 + VLEIB $10, $1, M2 + VLEIB $10, $1, M5 + + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + CMPBNE R3, $0, loop + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + // sum vectors + VAQ H0_0, H0_1, H0_0 + VAQ H1_0, H1_1, H1_0 + VAQ H2_0, H2_1, H2_0 + + // h may be >= 2*(2**130-5) so we need to reduce it again + // M0...M4 are used as temps here + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + +next: // carry h1->h2 + VLEIB $7, $0x28, T_1 + VREPIB $4, T_2 + VGBM $0x003F, T_3 + VESRLG $4, T_3 + + // byte shift + VSRLB T_1, H1_0, T_4 + + // bit shift + VSRL T_2, T_4, T_4 + + // clear h1 carry bits + VN T_3, H1_0, H1_0 + + // add carry + VAQ T_4, H2_0, H2_0 + + // h is now < 2*(2**130-5) + // pack h into h1 (hi) and h0 (lo) + PACK(H0_0, H1_0, H2_0) + + // if h > 2**130-5 then h -= 2**130-5 + MOD(H0_0, H1_0, T_0, T_1, T_2) + + // h += s + MOVD $·bswapMask<>(SB), R5 + VL (R5), T_1 + VL 16(R4), T_0 + VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) + VAQ T_0, H0_0, H0_0 + VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) + VST H0_0, (R1) + RET + +add: + // load EX0, EX1, EX2 + MOVD $·constants<>(SB), R5 + VLM (R5), EX0, EX2 + + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + CMPBLE R3, $64, b4 + +b4: + CMPBLE R3, $48, b3 // 3 blocks or less + + // 4(3+1) blocks remaining + SUB $49, R3 + VLM (R2), M0, M2 + VLL R3, 48(R2), M3 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M3 + MOVD $64(R2), R2 + EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) + VLEIB $10, $1, H2_0 + VLEIB $10, $1, H2_1 + VZERO M0 + VZERO M1 + VZERO M4 + VZERO M5 + VZERO T_4 + VZERO T_10 + EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) + VLR T_4, M2 + VLEIB $10, $1, M4 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_10 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + SUB $16, R3 + CMPBLE R3, $0, square // this condition must always hold true! + +b3: + CMPBLE R3, $32, b2 + + // 3 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) + + SUB $33, R3 + VLM (R2), M0, M1 + VLL R3, 32(R2), M2 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M2 + + // H += m0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAG H0_0, T_1, H0_0 + VAG H1_0, T_2, H1_0 + VAG H2_0, T_3, H2_0 + + VZERO M0 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + + // (H+m0)*r + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) + + // H += m1 + VZERO V0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) + VLEIB $10, $1, T_3 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + + // [H, m2] * [r**2, r] + EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, H2_0 + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) + SUB $16, R3 + CMPBLE R3, $0, next // this condition must always hold true! + +b2: + CMPBLE R3, $16, b1 + + // 2 blocks remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) + VMRHG V0, H0_1, H0_0 + VMRHG V0, H1_1, H1_0 + VMRHG V0, H2_1, H2_0 + VMRLG V0, H0_1, H0_1 + VMRLG V0, H1_1, H1_1 + VMRLG V0, H2_1, H2_1 + + // move h to the left and 0s at the right + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + + // get message blocks and append 1 to start + SUB $17, R3 + VL (R2), M0 + VLL R3, 16(R2), M1 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M1 + VZERO T_6 + VZERO T_7 + VZERO T_8 + EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) + EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) + VLEIB $2, $1, T_8 + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_8 + + // add [m0, m1] to h + VAG H0_0, T_6, H0_0 + VAG H1_0, T_7, H1_0 + VAG H2_0, T_8, H2_0 + + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + VZERO T_10 + VZERO M0 + + // at this point R_0 .. R5_2 look like [r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) + SUB $16, R3, R3 + CMPBLE R3, $0, next + +b1: + CMPBLE R3, $0, next + + // 1 block remaining + + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // H*[r**2, r] + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + // set up [0, m0] limbs + SUB $1, R3 + VLL R3, (R2), M0 + ADD $1, R3 + MOVBZ $1, R0 + CMPBEQ R3, $16, 2(PC) + VLVGB R3, R0, M0 + VZERO T_1 + VZERO T_2 + VZERO T_3 + EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] + CMPBNE R3, $16, 2(PC) + VLEIB $10, $1, T_3 + + // h+m0 + VAQ H0_0, T_1, H0_0 + VAQ H1_0, T_2, H1_0 + VAQ H2_0, T_3, H2_0 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + + BR next + +square: + // setup [r²,r] + VSLDB $8, R_0, R_0, R_0 + VSLDB $8, R_1, R_1, R_1 + VSLDB $8, R_2, R_2, R_2 + VSLDB $8, R5_1, R5_1, R5_1 + VSLDB $8, R5_2, R5_2, R5_2 + + VLVGG $1, RSAVE_0, R_0 + VLVGG $1, RSAVE_1, R_1 + VLVGG $1, RSAVE_2, R_2 + VLVGG $1, R5SAVE_1, R5_1 + VLVGG $1, R5SAVE_2, R5_2 + + // setup [h0, h1] + VSLDB $8, H0_0, H0_0, H0_0 + VSLDB $8, H1_0, H1_0, H1_0 + VSLDB $8, H2_0, H2_0, H2_0 + VO H0_1, H0_0, H0_0 + VO H1_1, H1_0, H1_0 + VO H2_1, H2_0, H2_0 + VZERO H0_1 + VZERO H1_1 + VZERO H2_1 + + VZERO M0 + VZERO M1 + VZERO M2 + VZERO M3 + VZERO M4 + VZERO M5 + + // (h0*r**2) + (h1*r) + MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) + REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) + BR next diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 000000000..1ab07d078 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive io.EOF. +func (b *buffer) eof() { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 000000000..00ed9923e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,535 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the +// PublicKey interface, so it can be unmarshaled using +// ParsePublicKey. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +type algorithmOpenSSHCertSigner struct { + *openSSHCertSigner + algorithmSigner AlgorithmSigner +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + return &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, algorithmSigner}, nil + } else { + return &openSSHCertSigner{cert, signer}, nil + } +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsUserAuthority should return true if the key is recognized as an + // authority for the given user certificate. This allows for + // certificates to be signed by other certificates. This must be set + // if this CertChecker will be checking user certificates. + IsUserAuthority func(auth PublicKey) bool + + // IsHostAuthority should report whether the key is recognized as + // an authority for this host. This allows for certificates to be + // signed by other keys, and for those other keys to only be valid + // signers for particular hostnames. This must be set if this + // CertChecker will be checking host certificates. + IsHostAuthority func(auth PublicKey, address string) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback HostKeyCallback + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + if !c.IsHostAuthority(cert.SignatureKey, addr) { + return fmt.Errorf("ssh: no authorities for hostname: %v", addr) + } + + hostname, _, err := net.SplitHostPort(addr) + if err != nil { + return err + } + + // Pass hostname only as principal for host certificates (consistent with OpenSSH) + return c.CheckCert(hostname, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + if !c.IsUserAuthority(cert.SignatureKey) { + return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) + } + + for opt := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 000000000..c0834c00d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (ch *channel) writePacket(packet []byte) error { + ch.writeMu.Lock() + if ch.sentClose { + ch.writeMu.Unlock() + return io.EOF + } + ch.sentClose = (packet[0] == msgChannelClose) + err := ch.mux.conn.writePacket(packet) + ch.writeMu.Unlock() + return err +} + +func (ch *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], ch.remoteId) + return ch.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if ch.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + ch.writeMu.Lock() + packet := ch.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + ch.writeMu.Unlock() + + for len(data) > 0 { + space := min(ch.maxRemotePayload, len(data)) + if space, err = ch.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], ch.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = ch.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + ch.writeMu.Lock() + ch.packetPool[extendedCode] = packet + ch.writeMu.Unlock() + + return n, err +} + +func (ch *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > ch.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + ch.windowMu.Lock() + if ch.myWindow < length { + ch.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + ch.myWindow -= length + ch.windowMu.Unlock() + + if extended == 1 { + ch.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + ch.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (ch *channel) responseMessageReceived() error { + if ch.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if ch.decided { + return errors.New("ssh: duplicate response received for channel") + } + ch.decided = true + return nil +} + +func (ch *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return ch.handleData(packet) + case msgChannelClose: + ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) + ch.mux.chanList.remove(ch.localId) + ch.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + ch.extPending.eof() + ch.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + ch.mux.chanList.remove(msg.PeersID) + ch.msg <- msg + case *channelOpenConfirmMsg: + if err := ch.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + ch.remoteId = msg.MyID + ch.maxRemotePayload = msg.MaxPacketSize + ch.remoteWin.add(msg.MyWindow) + ch.msg <- msg + case *windowAdjustMsg: + if !ch.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: ch, + } + + ch.incomingRequests <- &req + default: + ch.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (ch *channel) Accept() (Channel, <-chan *Request, error) { + if ch.decided { + return nil, nil, errDecidedAlready + } + ch.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersID: ch.remoteId, + MyID: ch.localId, + MyWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + } + ch.decided = true + if err := ch.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersID: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersID: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersID: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersID: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersID: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersID: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 000000000..a65a923be --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,770 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "math/bits" + + "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/poly1305" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type cipherMode struct { + keySize int + ivSize int + create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) +} + +func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + stream, err := createFunc(key, iv) + if err != nil { + return nil, err + } + + var streamDump []byte + if skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + mac := macModes[algs.MAC].new(macKey) + return &streamPacketCipher{ + mac: mac, + etm: macModes[algs.MAC].etm, + macResult: make([]byte, mac.Size()), + cipher: stream, + }, nil + } +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*cipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, + "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, streamCipherMode(0, newRC4)}, + + // AEAD ciphers + gcmCipherID: {16, 12, newGCMCipher}, + chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, + + // 3des-cbc is insecure and is not included in the default + // config. + tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readCipherPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writeCipherPacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, key, iv, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readCipherPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) + if err != nil { + return nil, err + } + c.oracleCamouflage -= uint32(n) + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} + +const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + +// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com +// AEAD, which is described here: +// +// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 +// +// the methods here also implement padding, which RFC4253 Section 6 +// also requires of stream ciphers. +type chacha20Poly1305Cipher struct { + lengthKey [8]uint32 + contentKey [8]uint32 + buf []byte +} + +func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { + if len(key) != 64 { + panic(len(key)) + } + + c := &chacha20Poly1305Cipher{ + buf: make([]byte, 256), + } + + for i := range c.contentKey { + c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) + } + for i := range c.lengthKey { + c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) + } + return c, nil +} + +func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) + var polyKey [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes + + encryptedLength := c.buf[:4] + if _, err := io.ReadFull(r, encryptedLength); err != nil { + return nil, err + } + + var lenBytes [4]byte + chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) + + length := binary.BigEndian.Uint32(lenBytes[:]) + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + contentEnd := 4 + length + packetEnd := contentEnd + poly1305.TagSize + if uint32(cap(c.buf)) < packetEnd { + c.buf = make([]byte, packetEnd) + copy(c.buf[:], encryptedLength) + } else { + c.buf = c.buf[:packetEnd] + } + + if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { + return nil, err + } + + var mac [poly1305.TagSize]byte + copy(mac[:], c.buf[contentEnd:packetEnd]) + if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { + return nil, errors.New("ssh: MAC failure") + } + + plain := c.buf[4:contentEnd] + s.XORKeyStream(plain, plain) + + padding := plain[0] + if padding < 4 { + // padding is a byte, so it automatically satisfies + // the maximum size, which is 255. + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding)+1 >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + + plain = plain[1 : len(plain)-int(padding)] + + return plain, nil +} + +func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) + var polyKey [32]byte + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes + + // There is no blocksize, so fall back to multiple of 8 byte + // padding, as described in RFC 4253, Sec 6. + const packetSizeMultiple = 8 + + padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple + if padding < 4 { + padding += packetSizeMultiple + } + + // size (4 bytes), padding (1), payload, padding, tag. + totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize + if cap(c.buf) < totalLength { + c.buf = make([]byte, totalLength) + } else { + c.buf = c.buf[:totalLength] + } + + binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) + chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) + c.buf[4] = byte(padding) + copy(c.buf[5:], payload) + packetEnd := 5 + len(payload) + padding + if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { + return err + } + + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) + + var mac [poly1305.TagSize]byte + poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) + + copy(c.buf[packetEnd:], mac[:]) + + if _, err := w.Write(c.buf); err != nil { + return err + } + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 000000000..7b00bff1c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,278 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "net" + "os" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. +type Client struct { + Conn + + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.HostKeyCallback == nil { + c.Close() + return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") + } + + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// HostKeyCallback is the function type used for verifying server +// keys. A HostKeyCallback must return nil if the host key is OK, or +// an error to reject it. It receives the hostname as passed to Dial +// or NewClientConn. The remote address is the RemoteAddr of the +// net.Conn underlying the SSH connection. +type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + +// BannerCallback is the function type used for treat the banner sent by +// the server. A BannerCallback receives the message sent by the remote server. +type BannerCallback func(message string) error + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback is called during the cryptographic + // handshake to validate the server's host key. The client + // configuration must supply this callback for the connection + // to succeed. The functions InsecureIgnoreHostKey or + // FixedHostKey can be used for simplistic host key checks. + HostKeyCallback HostKeyCallback + + // BannerCallback is called during the SSH dance to display a custom + // server's message. The client configuration can supply this callback to + // handle it as wished. The function BannerDisplayStderr can be used for + // simplistic display on Stderr. + BannerCallback BannerCallback + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} + +// InsecureIgnoreHostKey returns a function that can be used for +// ClientConfig.HostKeyCallback to accept any host key. It should +// not be used for production code. +func InsecureIgnoreHostKey() HostKeyCallback { + return func(hostname string, remote net.Addr, key PublicKey) error { + return nil + } +} + +type fixedHostKey struct { + key PublicKey +} + +func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { + if f.key == nil { + return fmt.Errorf("ssh: required host key was nil") + } + if !bytes.Equal(key.Marshal(), f.key.Marshal()) { + return fmt.Errorf("ssh: host key mismatch") + } + return nil +} + +// FixedHostKey returns a function for use in +// ClientConfig.HostKeyCallback to accept only a specific host key. +func FixedHostKey(key PublicKey) HostKeyCallback { + hk := &fixedHostKey{key} + return hk.check +} + +// BannerDisplayStderr returns a function that can be used for +// ClientConfig.BannerCallback to display banners on os.Stderr. +func BannerDisplayStderr() BannerCallback { + return func(banner string) error { + _, err := os.Stderr.WriteString(banner) + + return err + } +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 000000000..0590070e2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,639 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +type authResult int + +const ( + authFailure authResult = iota + authPartialSuccess + authSuccess +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok == authSuccess { + // success + return nil + } else if ok == authFailure { + tried[auth.method()] = true + } + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return authFailure, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return authFailure, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + // Authentication is performed by sending an enquiry to test if a key is + // acceptable to the remote. If the key is acceptable, the client will + // attempt to authenticate with the valid key. If not the client will repeat + // the process with the remaining keys. + + signers, err := cb() + if err != nil { + return authFailure, nil, err + } + var methods []string + for _, signer := range signers { + ok, err := validateKey(signer.PublicKey(), user, c) + if err != nil { + return authFailure, nil, err + } + if !ok { + continue + } + + pub := signer.PublicKey() + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return authFailure, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return authFailure, nil, err + } + var success authResult + success, methods, err = handleAuthResponse(c) + if err != nil { + return authFailure, nil, err + } + + // If authentication succeeds or the list of available methods does not + // contain the "publickey" method, do not attempt to authenticate with any + // other keys. According to RFC 4252 Section 7, the latter can occur when + // additional authentication methods are required. + if success == authSuccess || !containsMethod(methods, cb.method()) { + return success, methods, err + } + } + + return authFailure, methods, nil +} + +func containsMethod(methods []string, method string) bool { + for _, m := range methods { + if m == method { + return true + } + } + + return false +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return false, err + } + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (authResult, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +func handleBannerResponse(c packetConn, packet []byte) error { + var msg userAuthBannerMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + transport, ok := c.(*handshakeTransport) + if !ok { + return nil + } + + if transport.bannerCallback != nil { + return transport.bannerCallback(msg.Message) + } + + return nil +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns an AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return authFailure, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + if err := handleBannerResponse(c, packet); err != nil { + return authFailure, nil, err + } + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthSuccess: + return authSuccess, nil, nil + default: + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return authFailure, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return authFailure, nil, err + } + + if len(answers) != len(prompts) { + return authFailure, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return authFailure, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok != authFailure || err != nil { // either success, partial success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} + +// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. +// See RFC 4462 section 3 +// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. +// target is the server host you want to log in to. +func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { + if gssAPIClient == nil { + panic("gss-api client must be not nil with enable gssapi-with-mic") + } + return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} +} + +type gssAPIWithMICCallback struct { + gssAPIClient GSSAPIClient + target string +} + +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { + m := &userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: g.method(), + } + // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. + // See RFC 4462 section 3.2. + m.Payload = appendU32(m.Payload, 1) + m.Payload = appendString(m.Payload, string(krb5OID)) + if err := c.writePacket(Marshal(m)); err != nil { + return authFailure, nil, err + } + // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an + // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or + // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. + // See RFC 4462 section 3.3. + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check + // selected mech if it is valid. + packet, err := c.readPacket() + if err != nil { + return authFailure, nil, err + } + userAuthGSSAPIResp := &userAuthGSSAPIResponse{} + if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { + return authFailure, nil, err + } + // Start the loop into the exchange token. + // See RFC 4462 section 3.4. + var token []byte + defer g.gssAPIClient.DeleteSecContext() + for { + // Initiates the establishment of a security context between the application and a remote peer. + nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) + if err != nil { + return authFailure, nil, err + } + if len(nextToken) > 0 { + if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: nextToken, + })); err != nil { + return authFailure, nil, err + } + } + if !needContinue { + break + } + packet, err = c.readPacket() + if err != nil { + return authFailure, nil, err + } + switch packet[0] { + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return authFailure, nil, err + } + if msg.PartialSuccess { + return authPartialSuccess, msg.Methods, nil + } + return authFailure, msg.Methods, nil + case msgUserAuthGSSAPIError: + userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} + if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { + return authFailure, nil, err + } + return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ + "Major Status: %d\n"+ + "Minor Status: %d\n"+ + "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, + userAuthGSSAPIErrorResp.Message) + case msgUserAuthGSSAPIToken: + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return authFailure, nil, err + } + token = userAuthGSSAPITokenReq.Token + } + } + // Binding Encryption Keys. + // See RFC 4462 section 3.5. + micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") + micToken, err := g.gssAPIClient.GetMIC(micField) + if err != nil { + return authFailure, nil, err + } + if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ + MIC: micToken, + })); err != nil { + return authFailure, nil, err + } + return handleAuthResponse(c) +} + +func (g *gssAPIWithMICCallback) method() string { + return "gssapi-with-mic" +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 000000000..e55fe0ad6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,396 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "math" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers lists ciphers we support but might not recommend. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden +// for the server half. +var serverForbiddenKexAlgos = map[string]struct{}{ + kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests + kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests +} + +// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +// directionAlgorithms records algorithm choices in one direction (either read or write) +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + stoc, ctos := &result.w, &result.r + if isClient { + ctos, stoc = stoc, ctos + } + + ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, a size suitable for the chosen cipher is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = preferredCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // cipher specific default + } else if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } else if c.RekeyThreshold >= math.MaxInt64 { + // Avoid weirdness if somebody uses -1 as a threshold. + c.RekeyThreshold = math.MaxInt64 + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionID, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 000000000..fd6b0681b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the session hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 000000000..67b7322c0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,21 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 + +This package does not fall under the stability promise of the Go language itself, +so its API may be changed when pressing needs arise. +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 000000000..2b10b05a4 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,647 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback HostKeyCallback + dialAddress string + remoteAddr net.Addr + + // bannerCallback is non-empty if we are the client and it has been set in + // ClientConfig. In that case it is called during the user authentication + // dance to handle a custom server's message. + bannerCallback BannerCallback + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + t.resetReadThresholds() + t.resetWriteThresholds() + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + t.bannerCallback = config.BannerCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) resetWriteThresholds() { + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } else { + t.writeBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + + t.resetWriteThresholds() + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) resetReadThresholds() { + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } else { + t.readBytesLeft = 1 << 30 + } +} + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.resetReadThresholds() + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + isClient := len(t.hostKeys) == 0 + if isClient { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { + return err + } + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 000000000..16072004b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,789 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + + // For the following kex only the client half contains a production + // ready implementation. The server half only consists of a minimal + // implementation to satisfy the automated tests. + kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" + kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + ki, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + ki, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} + kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + ki := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(ki)) + marshalInt(K, ki) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} + +// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and +// diffie-hellman-group-exchange-sha256 key agreement protocols, +// as described in RFC 4419 +type dhGEXSHA struct { + g, p *big.Int + hashFunc crypto.Hash +} + +const numMRTests = 64 + +const ( + dhGroupExchangeMinimumBits = 2048 + dhGroupExchangePreferredBits = 2048 + dhGroupExchangeMaximumBits = 8192 +) + +func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { + return nil, fmt.Errorf("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil +} + +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + // Send GexRequest + kexDHGexRequest := kexDHGexRequestMsg{ + MinBits: dhGroupExchangeMinimumBits, + PreferedBits: dhGroupExchangePreferredBits, + MaxBits: dhGroupExchangeMaximumBits, + } + if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { + return nil, err + } + + // Receive GexGroup + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexGroup kexDHGexGroupMsg + if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + return nil, err + } + + // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits + if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + } + + gex.p = kexDHGexGroup.P + gex.g = kexDHGexGroup.G + + // Check if p is safe by verifing that p and (p-1)/2 are primes + one := big.NewInt(1) + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + if !gex.p.ProbablyPrime(numMRTests) || !pHalf.ProbablyPrime(numMRTests) { + return nil, fmt.Errorf("ssh: server provided gex p is not safe") + } + + // Check if g is safe by verifing that g > 1 and g < p - 1 + var pMinusOne = &big.Int{} + pMinusOne.Sub(gex.p, one) + if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: server provided gex g is not safe") + } + + // Send GexInit + x, err := rand.Int(randSource, pHalf) + if err != nil { + return nil, err + } + X := new(big.Int).Exp(gex.g, x, gex.p) + kexDHGexInit := kexDHGexInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { + return nil, err + } + + // Receive GexReply + packet, err = c.readPacket() + if err != nil { + return nil, err + } + + var kexDHGexReply kexDHGexReplyMsg + if err = Unmarshal(packet, &kexDHGexReply); err != nil { + return nil, err + } + + kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) + if err != nil { + return nil, err + } + + // Check if k is safe by verifing that k > 1 and k < p - 1 + if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + return nil, fmt.Errorf("ssh: derived k is not safe") + } + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, kexDHGexReply.HostKey) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, X) + writeInt(h, kexDHGexReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHGexReply.HostKey, + Signature: kexDHGexReply.Signature, + Hash: gex.hashFunc, + }, nil +} + +// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. +// +// This is a minimal implementation to satisfy the automated tests. +func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + // Receive GexRequest + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHGexRequest kexDHGexRequestMsg + if err = Unmarshal(packet, &kexDHGexRequest); err != nil { + return + } + + // smoosh the user's preferred size into our own limits + if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits + } + if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { + kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits + } + // fix min/max if they're inconsistent. technically, we could just pout + // and hang up, but there's no harm in giving them the benefit of the + // doubt and just picking a bitsize for them. + if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { + kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits + } + if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { + kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits + } + + // Send GexGroup + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + gex.p = p + gex.g = big.NewInt(2) + + kexDHGexGroup := kexDHGexGroupMsg{ + P: gex.p, + G: gex.g, + } + if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + return nil, err + } + + // Receive GexInit + packet, err = c.readPacket() + if err != nil { + return + } + var kexDHGexInit kexDHGexInitMsg + if err = Unmarshal(packet, &kexDHGexInit); err != nil { + return + } + + var pHalf = &big.Int{} + pHalf.Rsh(gex.p, 1) + + y, err := rand.Int(randSource, pHalf) + if err != nil { + return + } + + Y := new(big.Int).Exp(gex.g, y, gex.p) + kInt, err := gex.diffieHellman(kexDHGexInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := gex.hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) + binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) + writeInt(h, gex.p) + writeInt(h, gex.g) + writeInt(h, kexDHGexInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHGexReply := kexDHGexReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHGexReply) + + err = c.writePacket(packet) + + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: gex.hashFunc, + }, err +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 000000000..969804794 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,1100 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" +) + +// These constants represent non-default signature algorithms that are supported +// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See +// [PROTOCOL.agent] section 4.5.1 and +// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 +const ( + SigAlgoRSA = "ssh-rsa" + SigAlgoRSASHA2256 = "rsa-sha2-256" + SigAlgoRSASHA2512 = "rsa-sha2-512" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. To unmarshal the returned data, use + // the ParsePublicKey function. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +// A AlgorithmSigner is a Signer that also supports specifying a specific +// algorithm to use for signing. +type AlgorithmSigner interface { + Signer + + // SignWithAlgorithm is like Signer.Sign, but allows specification of a + // non-default signing algorithm. See the SigAlgo* constants in this + // package for signature algorithms supported by this package. Callers may + // pass an empty string for the algorithm in which case the AlgorithmSigner + // will use its default algorithm. + SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + var hash crypto.Hash + switch sig.Format { + case SigAlgoRSA: + hash = crypto.SHA1 + case SigAlgoRSASHA2256: + hash = crypto.SHA256 + case SigAlgoRSASHA2512: + hash = crypto.SHA512 + default: + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := hash.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (k *dsaPublicKey) Type() string { + return "ssh-dss" +} + +func checkDSAParams(param *dsa.Parameters) error { + // SSH specifies FIPS 186-2, which only provided a single size + // (1024 bits) DSA key. FIPS 186-3 allows for larger key + // sizes, which would confuse SSH. + if l := param.P.BitLen(); l != 1024 { + return fmt.Errorf("ssh: unsupported DSA key size %d", l) + } + + return nil +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + param := dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + } + if err := checkDSAParams(¶m); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: param, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + return k.SignWithAlgorithm(rand, data, "") +} + +func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != "" && algorithm != k.PublicKey().Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (k *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + k.nistID() +} + +func (k *ecdsaPublicKey) nistID() string { + switch k.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (k ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (k ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(k), + } + return Marshal(&w) +} + +func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + edKey := (ed25519.PublicKey)(k) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (k *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + k.Type(), + k.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a +// corresponding Signer instance. ECDSA keys must use P-256, P-384 or +// P-521. DSA keys must use parameter size L1024N160. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return newDSAPrivateKey(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { + if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { + return nil, err + } + + return &dsaPrivateKey{key}, nil +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.SignWithAlgorithm(rand, data, "") +} + +func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + var hashFunc crypto.Hash + + if _, ok := s.pubKey.(*rsaPublicKey); ok { + // RSA keys support a few hash functions determined by the requested signature algorithm + switch algorithm { + case "", SigAlgoRSA: + algorithm = SigAlgoRSA + hashFunc = crypto.SHA1 + case SigAlgoRSASHA2256: + hashFunc = crypto.SHA256 + case SigAlgoRSASHA2512: + hashFunc = crypto.SHA512 + default: + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + } else { + // The only supported algorithm for all other key types is the same as the type of the key + if algorithm == "" { + algorithm = s.pubKey.Type() + } else if algorithm != s.pubKey.Type() { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) + } + + switch key := s.pubKey.(type) { + case *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: algorithm, + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private +// key and passphrase. It supports the same keys as +// ParseRawPrivateKeyWithPassphrase. +func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + // RFC5208 - https://tools.ietf.org/html/rfc5208 + case "PRIVATE KEY": + return x509.ParsePKCS8PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with +// passphrase from a PEM encoded private key. If wrong passphrase, return +// x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + buf := block.Bytes + + if encryptedBlock(block) { + if x509.IsEncryptedPEMBlock(block) { + var err error + buf, err = x509.DecryptPEMBlock(block, passPhrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err + } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) + } + } + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(buf) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(buf) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(buf) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(buf) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { + const magic = "openssh-key-v1\x00" + if len(key) < len(magic) || string(key[:len(magic)]) != magic { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + if w.KdfName != "none" || w.CipherName != "none" { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 and rsa keys currently + switch pk1.Keytype { + case KeyAlgoRSA: + // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 + key := struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: key.N, + E: int(key.E.Int64()), + }, + D: key.D, + Primes: []*big.Int{key.P, key.Q}, + } + + if err := pk.Validate(); err != nil { + return nil, err + } + + pk.Precompute() + + return pk, nil + case KeyAlgoED25519: + key := struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if len(key.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + for i, b := range key.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, key.Priv) + return &pk, nil + default: + return nil, errors.New("ssh: unhandled key type") + } +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 000000000..c07a06285 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 000000000..ac41a4168 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,866 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4419, section 5. +const msgKexDHGexGroup = 31 + +type kexDHGexGroupMsg struct { + P *big.Int `sshtype:"31"` + G *big.Int +} + +const msgKexDHGexInit = 32 + +type kexDHGexInitMsg struct { + X *big.Int `sshtype:"32"` +} + +const msgKexDHGexReply = 33 + +type kexDHGexReplyMsg struct { + HostKey []byte `sshtype:"33"` + Y *big.Int + Signature []byte +} + +const msgKexDHGexRequest = 34 + +type kexDHGexRequestMsg struct { + MinBits uint32 `sshtype:"34"` + PreferedBits uint32 + MaxBits uint32 +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4252, section 5.1 +const msgUserAuthSuccess = 52 + +// See RFC 4252, section 5.4 +const msgUserAuthBanner = 53 + +type userAuthBannerMsg struct { + Message string `sshtype:"53"` + // unused, but required to allow message parsing + Language string +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersID uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersID uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersID uint32 `sshtype:"91"` + MyID uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersID uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersID uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersID uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersID uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersID uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersID uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersID uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// See RFC 4462, section 3 +const msgUserAuthGSSAPIResponse = 60 + +type userAuthGSSAPIResponse struct { + SupportMech []byte `sshtype:"60"` +} + +const msgUserAuthGSSAPIToken = 61 + +type userAuthGSSAPIToken struct { + Token []byte `sshtype:"61"` +} + +const msgUserAuthGSSAPIMIC = 66 + +type userAuthGSSAPIMIC struct { + MIC []byte `sshtype:"66"` +} + +// See RFC 4462, section 3.9 +const msgUserAuthGSSAPIErrTok = 64 + +type userAuthGSSAPIErrTok struct { + ErrorToken []byte `sshtype:"64"` +} + +// See RFC 4462, section 3.8 +const msgUserAuthGSSAPIError = 65 + +type userAuthGSSAPIError struct { + MajorStatus uint32 `sshtype:"65"` + MinorStatus uint32 + Message string + LanguageTag string +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + case msgUserAuthGSSAPIToken: + msg = new(userAuthGSSAPIToken) + case msgUserAuthGSSAPIMIC: + msg = new(userAuthGSSAPIMIC) + case msgUserAuthGSSAPIErrTok: + msg = new(userAuthGSSAPIErrTok) + case msgUserAuthGSSAPIError: + msg = new(userAuthGSSAPIError) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +var packetTypeNames = map[byte]string{ + msgDisconnect: "disconnectMsg", + msgServiceRequest: "serviceRequestMsg", + msgServiceAccept: "serviceAcceptMsg", + msgKexInit: "kexInitMsg", + msgKexDHInit: "kexDHInitMsg", + msgKexDHReply: "kexDHReplyMsg", + msgUserAuthRequest: "userAuthRequestMsg", + msgUserAuthSuccess: "userAuthSuccessMsg", + msgUserAuthFailure: "userAuthFailureMsg", + msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", + msgGlobalRequest: "globalRequestMsg", + msgRequestSuccess: "globalRequestSuccessMsg", + msgRequestFailure: "globalRequestFailureMsg", + msgChannelOpen: "channelOpenMsg", + msgChannelData: "channelDataMsg", + msgChannelOpenConfirm: "channelOpenConfirmMsg", + msgChannelOpenFailure: "channelOpenFailureMsg", + msgChannelWindowAdjust: "windowAdjustMsg", + msgChannelEOF: "channelEOFMsg", + msgChannelClose: "channelCloseMsg", + msgChannelRequest: "channelRequestMsg", + msgChannelSuccess: "channelRequestSuccessMsg", + msgChannelFailure: "channelRequestFailureMsg", +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 000000000..f19016270 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersID: msg.PeersID, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersID + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersID: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 000000000..7a5a1d7ad --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,716 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a user. +// The Permissions value for a successful authentication attempt is +// available in ServerConn, so it can be used to pass information from +// the user-authentication phase to the application layer. +type Permissions struct { + // CriticalOptions indicate restrictions to the default + // permissions, and are typically used in conjunction with + // user certificates. The standard for SSH certificates + // defines "force-command" (only allow the given command to + // execute) and "source-address" (only allow connections from + // the given address). The SSH package currently only enforces + // the "source-address" critical option. It is up to server + // implementations to enforce other critical options, such as + // "force-command", by checking them after the SSH handshake + // is successful. In general, SSH servers should reject + // connections that specify critical options that are unknown + // or not supported. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Lack of support for an + // extension does not preclude authenticating a user. Common + // extensions are "permit-agent-forwarding", + // "permit-X11-forwarding". The Go SSH library currently does + // not act on any extension, and it is up to server + // implementations to honor them. Extensions can be used to + // pass data from the authentication callbacks to the server + // application layer. + Extensions map[string]string +} + +type GSSAPIWithMICConfig struct { + // AllowLogin, must be set, is called when gssapi-with-mic + // authentication is selected (RFC 4462 section 3). The srcName is from the + // results of the GSS-API authentication. The format is username@DOMAIN. + // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. + // This callback is called after the user identity is established with GSSAPI to decide if the user can login with + // which permissions. If the user is allowed to login, it should return a nil error. + AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) + + // Server must be set. It's the implementation + // of the GSSAPIServer interface. See GSSAPIServer interface for details. + Server GSSAPIServer +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // MaxAuthTries specifies the maximum number of authentication attempts + // permitted per connection. If set to a negative number, the number of + // attempts are unlimited. If set to zero, the number of attempts are limited + // to 6. + MaxAuthTries int + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client + // offers a public key for authentication. It must return a nil error + // if the given public key can be used to authenticate the + // given user. For example, see CertChecker.Authenticate. A + // call to this function does not guarantee that the key + // offered is in fact used to authenticate. To record any data + // depending on the public key, store it inside a + // Permissions.Extensions entry. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string + + // BannerCallback, if present, is called and the return string is sent to + // the client after key exchange completed but before authentication. + BannerCallback func(conn ConnMetadata) string + + // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used + // when gssapi-with-mic authentication is selected (RFC 4462 section 3). + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + if fullConf.MaxAuthTries == 0 { + fullConf.MaxAuthTries = 6 + } + // Check if the config contains any unsupported key exchanges + for _, kex := range fullConf.KeyExchanges { + if _, ok := serverForbiddenKexAlgos[kex]; ok { + return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) + } + } + + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && + config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || + config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, + sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { + gssAPIServer := gssapiConfig.Server + defer gssAPIServer.DeleteSecContext() + var srcName string + for { + var ( + outToken []byte + needContinue bool + ) + outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) + if err != nil { + return err, nil, nil + } + if len(outToken) != 0 { + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ + Token: outToken, + })); err != nil { + return nil, nil, err + } + } + if !needContinue { + break + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, nil, err + } + } + packet, err := s.transport.readPacket() + if err != nil { + return nil, nil, err + } + userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} + if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { + return nil, nil, err + } + mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) + if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { + return err, nil, nil + } + perms, authErr = gssapiConfig.AllowLogin(s, srcName) + return authErr, perms, nil +} + +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. +type ServerAuthError struct { + // Errors contains authentication errors returned by the authentication + // callback methods. The first entry is typically ErrNoAuth. + Errors []error +} + +func (l ServerAuthError) Error() string { + var errs []string + for _, err := range l.Errors { + errs = append(errs, err.Error()) + } + return "[" + strings.Join(errs, ", ") + "]" +} + +// ErrNoAuth is the error value returned if no +// authentication method has been passed yet. This happens as a normal +// part of the authentication loop, since the client first tries +// 'none' authentication to discover available methods. +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + + authFailures := 0 + var authErrs []error + var displayedBanner bool + +userAuthLoop: + for { + if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { + discMsg := &disconnectMsg{ + Reason: 2, + Message: "too many authentication failures", + } + + if err := s.transport.writePacket(Marshal(discMsg)); err != nil { + return nil, err + } + + return nil, discMsg + } + + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + if err == io.EOF { + return nil, &ServerAuthError{Errors: authErrs} + } + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + + if !displayedBanner && config.BannerCallback != nil { + displayedBanner = true + msg := config.BannerCallback(s) + if msg != "" { + bannerMsg := &userAuthBannerMsg{ + Message: msg, + } + if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + return nil, err + } + } + } + + perms = nil + authErr := ErrNoAuth + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + + // allow initial attempt of 'none' without penalty + if authFailures == 0 { + authFailures-- + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configured") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + case "gssapi-with-mic": + gssapiConfig := config.GSSAPIWithMICConfig + userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) + if err != nil { + return nil, parseError(msgUserAuthRequest) + } + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. + if userAuthRequestGSSAPI.N == 0 { + authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") + break + } + var i uint32 + present := false + for i = 0; i < userAuthRequestGSSAPI.N; i++ { + if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { + present = true + break + } + } + if !present { + authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") + break + } + // Initial server response, see RFC 4462 section 3.3. + if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ + SupportMech: krb5OID, + })); err != nil { + return nil, err + } + // Exchange token, see RFC 4462 section 3.4. + packet, err := s.transport.readPacket() + if err != nil { + return nil, err + } + userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} + if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { + return nil, err + } + authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, + userAuthReq) + if err != nil { + return nil, err + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + authErrs = append(authErrs, authErr) + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + authFailures++ + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && + config.GSSAPIWithMICConfig.AllowLogin != nil { + failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 000000000..d3321f6b7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,647 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.7. +type ptyWindowChangeMsg struct { + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 +} + +// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. +func (s *Session) WindowChange(h, w int) error { + req := ptyWindowChangeMsg{ + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + } + _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go new file mode 100644 index 000000000..24bd7c8e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go @@ -0,0 +1,139 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/asn1" + "errors" +) + +var krb5OID []byte + +func init() { + krb5OID, _ = asn1.Marshal(krb5Mesh) +} + +// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. +type GSSAPIClient interface { + // InitSecContext initiates the establishment of a security context for GSS-API between the + // ssh client and ssh server. Initially the token parameter should be specified as nil. + // The routine may return a outputToken which should be transferred to + // the ssh server, where the ssh server will present it to + // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting + // needContinue to false. To complete the context + // establishment, one or more reply tokens may be required from the ssh + // server;if so, InitSecContext will return a needContinue which is true. + // In this case, InitSecContext should be called again when the + // reply token is received from the ssh server, passing the reply + // token to InitSecContext via the token parameters. + // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. + InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) + // GetMIC generates a cryptographic MIC for the SSH2 message, and places + // the MIC in a token for transfer to the ssh server. + // The contents of the MIC field are obtained by calling GSS_GetMIC() + // over the following, using the GSS-API context that was just + // established: + // string session identifier + // byte SSH_MSG_USERAUTH_REQUEST + // string user name + // string service + // string "gssapi-with-mic" + // See RFC 2743 section 2.3.1 and RFC 4462 3.5. + GetMIC(micFiled []byte) ([]byte, error) + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. +type GSSAPIServer interface { + // AcceptSecContext allows a remotely initiated security context between the application + // and a remote peer to be established by the ssh client. The routine may return a + // outputToken which should be transferred to the ssh client, + // where the ssh client will present it to InitSecContext. + // If no token need be sent, AcceptSecContext will indicate this + // by setting the needContinue to false. To + // complete the context establishment, one or more reply tokens may be + // required from the ssh client. if so, AcceptSecContext + // will return a needContinue which is true, in which case it + // should be called again when the reply token is received from the ssh + // client, passing the token to AcceptSecContext via the + // token parameters. + // The srcName return value is the authenticated username. + // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. + AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) + // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, + // fits the supplied message is received from the ssh client. + // See RFC 2743 section 2.3.2. + VerifyMIC(micField []byte, micToken []byte) error + // Whenever possible, it should be possible for + // DeleteSecContext() calls to be successfully processed even + // if other calls cannot succeed, thereby enabling context-related + // resources to be released. + // In addition to deleting established security contexts, + // gss_delete_sec_context must also be able to delete "half-built" + // security contexts resulting from an incomplete sequence of + // InitSecContext()/AcceptSecContext() calls. + // See RFC 2743 section 2.2.3. + DeleteSecContext() error +} + +var ( + // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, + // so we also support the krb5 mechanism only. + // See RFC 1964 section 1. + krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} +) + +// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST +// See RFC 4462 section 3.2. +type userAuthRequestGSSAPI struct { + N uint32 + OIDS []asn1.ObjectIdentifier +} + +func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { + n, rest, ok := parseUint32(payload) + if !ok { + return nil, errors.New("parse uint32 failed") + } + s := &userAuthRequestGSSAPI{ + N: n, + OIDS: make([]asn1.ObjectIdentifier, n), + } + for i := 0; i < int(n); i++ { + var ( + desiredMech []byte + err error + ) + desiredMech, rest, ok = parseString(rest) + if !ok { + return nil, errors.New("parse string failed") + } + if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { + return nil, err + } + + } + return s, nil +} + +// See RFC 4462 section 3.6. +func buildMIC(sessionID string, username string, service string, authMethod string) []byte { + out := make([]byte, 0, 0) + out = appendString(out, sessionID) + out = append(out, msgUserAuthRequest) + out = appendString(out, username) + out = appendString(out, service) + out = appendString(out, authMethod) + return out +} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go new file mode 100644 index 000000000..b171b330b --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -0,0 +1,116 @@ +package ssh + +import ( + "errors" + "io" + "net" +) + +// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "direct-streamlocal@openssh.com" string. +// +// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 +type streamLocalChannelOpenDirectMsg struct { + socketPath string + reserved0 string + reserved1 uint32 +} + +// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message +// with "forwarded-streamlocal@openssh.com" string. +type forwardedStreamLocalPayload struct { + SocketPath string + Reserved0 string +} + +// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message +// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. +type streamLocalChannelForwardMsg struct { + socketPath string +} + +// ListenUnix is similar to ListenTCP but uses a Unix domain socket. +func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + m := streamLocalChannelForwardMsg{ + socketPath, + } + // send message + ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") + } + ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) + + return &unixListener{socketPath, c, ch}, nil +} + +func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { + msg := streamLocalChannelOpenDirectMsg{ + socketPath: socketPath, + } + ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type unixListener struct { + socketPath string + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *unixListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + }, nil +} + +// Close closes the listener. +func (l *unixListener) Close() error { + // this also closes the listener. + l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) + m := streamLocalChannelForwardMsg{ + l.socketPath, + } + ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *unixListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: l.socketPath, + Net: "unix", + } +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 000000000..80d35f5ec --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,474 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +// N must be "tcp", "tcp4", "tcp6", or "unix". +func (c *Client) Listen(n, addr string) (net.Listener, error) { + switch n { + case "tcp", "tcp4", "tcp6": + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) + case "unix": + return c.ListenUnix(addr) + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.Addr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr net.Addr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.Addr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + laddr: addr, + c: make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var ( + laddr net.Addr + raddr net.Addr + err error + ) + switch channelType := ch.ChannelType(); channelType { + case "forwarded-tcpip": + var payload forwardedTCPPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err = parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + case "forwarded-streamlocal@openssh.com": + var payload forwardedStreamLocalPayload + if err = Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) + continue + } + laddr = &net.UnixAddr{ + Name: payload.SocketPath, + Net: "unix", + } + raddr = &net.UnixAddr{ + Name: "@", + Net: "unix", + } + default: + panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) + } + if ok := l.forward(laddr, raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.Addr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { + f.c <- forward{newCh: ch, raddr: raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &chanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + var ch Channel + switch n { + case "tcp", "tcp4", "tcp6": + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + return &chanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil + case "unix": + var err error + ch, err = c.dialStreamLocal(addr) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: &net.UnixAddr{ + Name: "@", + Net: "unix", + }, + raddr: &net.UnixAddr{ + Name: addr, + Net: "unix", + }, + }, nil + default: + return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) + } +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &chanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// chanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type chanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *chanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *chanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *chanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *chanConn) SetReadDeadline(deadline time.Time) error { + // for compatibility with previous version, + // the error message contains "tcpChan" + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *chanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 000000000..49ddc2e7d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,353 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writeCipherPacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readCipherPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) + if err != nil { + return err + } + t.reader.pendingKeyChange <- ciph + + ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) + if err != nil { + return err + } + t.writer.pendingKeyChange <- ciph + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv := make([]byte, cipherMode.ivSize) + key := make([]byte, cipherMode.keySize) + macKey := make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + + return cipherModes[algs.Cipher].create(key, iv, macKey, algs) +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for length := 0; length < maxVersionStringBytes; length++ { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + if !bytes.HasPrefix(versionString, []byte("SSH-")) { + // RFC 4253 says we need to ignore all version string lines + // except the one containing the SSH version (provided that + // all the lines do not exceed 255 bytes in total). + versionString = versionString[:0] + continue + } + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..a3c021d3f --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 000000000..37dc0cfdb --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..d20f52b7d --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..d88bd1db1 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..0f35592df --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..b105f80be --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 000000000..e6cd0ced3 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go new file mode 100644 index 000000000..e7de24ee6 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -0,0 +1,346 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpguts + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/http2/.gitignore b/vendor/golang.org/x/net/http2/.gitignore new file mode 100644 index 000000000..190f12234 --- /dev/null +++ b/vendor/golang.org/x/net/http2/.gitignore @@ -0,0 +1,2 @@ +*~ +h2i/h2i diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 000000000..53fc52579 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 000000000..55fd826f7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 000000000..360d5aa37 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 000000000..c9a0cf3b4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 000000000..f4d9b5ece --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,282 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + traceGetConn(req, addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + traceGetConn(req, addr) + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 000000000..71f2c4631 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,133 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 000000000..cea601fcd --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true + } + return false +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 000000000..514c126c5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1614 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1< 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteDataPadded writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true + } + } + return 0, false +} + +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { + return err + } + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httpguts.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..3a131016b --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 000000000..9933c9f8c --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 000000000..c3ff3fa1c --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,88 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" + "sync" +) + +var ( + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case +) + +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 000000000..97f17831f --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 000000000..85f18a2b0 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,504 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer + + firstField bool // processing the first field of the header block +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + firstField: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + d.firstField = true + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + d.firstField = false + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if !d.firstField && d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 000000000..b412a96c5 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,222 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1< 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 000000000..a66cfbea6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 000000000..bdaba1d46 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,384 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httpguts.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httpguts.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +func httpCodeString(code int) string { + switch code { + case 200: + return "200" + case 404: + return "404" + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..161bca7ce --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 000000000..a6140099c --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 000000000..d2ba820c7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2965 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +// maxQueuedControlFrames is the maximum number of control frames like +// SETTINGS, PING and RST_STREAM that will be queued for writing before +// the connection is closed to prevent memory exhaustion attacks. +func (s *Server) maxQueuedControlFrames() int { + // TODO: if anybody asks, add a Server field, and remember to define the + // behavior of negative values. + return maxQueuedControlFrames +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } + conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) context() context.Context { + if o != nil && o.Context != nil { + return o.Context + } + return context.Background() +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(opts.context()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx context.Context + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + queuedControlFrames int // control frames in the writeSched queue + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx context.Context + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + buildCommonHeaderMapsOnce() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // If the peer is causing us to generate a lot of control frames, + // but not reading them from us, assume they are trying to make us + // run out of memory. + if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + sc.vlogf("http2: too many control frames in send queue, closing connection") + return + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + if wr.isControl() { + sc.queuedControlFrames++ + // For extra safety, detect wraparounds, which should not happen, + // and pull the plug. + if sc.queuedControlFrames < 0 { + sc.conn.Close() + } + } + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written and we need to send one, the best frame +// to send is selected by writeSched. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + if wr.isControl() { + sc.queuedControlFrames-- + } + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be + // acknowledged individually, even if multiple are received before the ACK. + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !httpguts.ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := context.WithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = req.WithContext(st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if e != nil && e != http.ErrAbortHandler { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } + +func (rws *responseWriterState) hasNonemptyTrailers() bool { + for _, trailer := range rws.trailers { + if _, ok := rws.handlerHeader[trailer]; ok { + return true + } + } + return false +} + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + // If the Content-Encoding is non-blank, we shouldn't + // sniff the body. See Issue golang.org/issue/31753. + ce := rws.snapHeader.Get("Content-Encoding") + hasCE := len(ce) > 0 + if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + // only send trailers if they have actually been defined by the + // server handler. + hasNonemptyTrailers := rws.hasNonemptyTrailers() + endStream := rws.handlerDone && !hasNonemptyTrailers + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && hasNonemptyTrailers { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclaring them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +var _ http.Pusher = (*responseWriter)(nil) + +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + if opts == nil { + opts = new(http.PushOptions) + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiated. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 000000000..c51a73c06 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2634 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) + return err +} + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closing bool + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *httptrace.ClientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := req.Context() + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { + return req, nil + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig.Clone() + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return t.t1.ExpectContinueTimeout +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if t.AllowHTTP { + cc.nextStreamID = 3 + } + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { + if cc.singleUse && cc.nextStreamID > 1 { + return + } + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = httptrace.ContextClientTrace(req.Context()) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := req.Context() + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") + + errReqBodyTooLong = errors.New("http2: request body larger than specified content length") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + remainLen := actualContentLength(req) + hasContentLen := remainLen != -1 + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf[:len(buf)-1]) + if hasContentLen { + remainLen -= int64(n) + if remainLen == 0 && err == nil { + // The request body's Content-Length was predeclared and + // we just finished reading it all, but the underlying io.Reader + // returned the final chunk with a nil error (which is one of + // the two valid things a Reader can do at EOF). Because we'd prefer + // to send the END_STREAM bit early, double-check that we're actually + // at EOF. Subsequent reads should return (0, EOF) at this point. + // If either value is different, we return an error in one of two ways below. + var n1 int + n1, err = body.Read(buf[n:]) + remainLen -= int64(n1) + } + if remainLen < 0 { + err = errReqBodyTooLong + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + } + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 1xx responses). +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + res.Uncompressed = true + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httpguts.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused + cc.mu.Lock() + ci.WasIdle = len(cc.streams) == 0 && reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 000000000..3849bc263 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // upper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 000000000..f24d2b1e7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,248 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. No frames should be discarded except by CloseStream. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// isControl reports whether wr is a control frame for MaxQueuedControlFrames +// purposes. That includes non-stream frames and RST_STREAM frames. +func (wr FrameWriteRequest) isControl() bool { + return wr.stream == nil +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 000000000..2618b2c11 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this function returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 000000000..9a7b9e581 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for streamID, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go new file mode 100644 index 000000000..a98a31f40 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -0,0 +1,734 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go new file mode 100644 index 000000000..8842146b5 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -0,0 +1,682 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) + } + i += sz + } + return s, nil +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000..02c7d59af --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go new file mode 100644 index 000000000..54fddb4b1 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -0,0 +1,4559 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.10,!go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42114 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go new file mode 100644 index 000000000..c515d7ad2 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -0,0 +1,4653 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, + 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, + 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168, + 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179, + 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d, + 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184, + 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 276 entries, 552 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca} + +// idnaSparseValues: 1997 entries, 7988 bytes +var idnaSparseValues = [1997]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x86 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x224 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22e + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23a + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x246 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x252 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25f + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x269 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x27a + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x289 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x296 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a4 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ac + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xbf}, + // Block 0x53, offset 0x2c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2c7 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2cd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f5 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x5b, offset 0x2f8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x2fe + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x302 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x304 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x307 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x309 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x316 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x319 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x328 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32c + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x334 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x33d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x342 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x348 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x34e + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x367 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x376 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x383 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x38d + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a0 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b1 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3ba + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3ca + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3d7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e1 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e6 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3fc + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3fe + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x402 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x404 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x408 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x411 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x417 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41b + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x435 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x43d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x443 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x44f + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x45e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46c + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x472 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x479 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x487 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x490 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x493 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x498 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a4 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4aa + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4af + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4be + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4c7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4d7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4de + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4ed + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4ef + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4f9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x502 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xac, offset 0x50e + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xad, offset 0x516 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xae, offset 0x51c + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xaf, offset 0x525 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb0, offset 0x531 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb1, offset 0x538 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb2, offset 0x541 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb3, offset 0x54b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb4, offset 0x552 + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb5, offset 0x560 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb6, offset 0x56d + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb7, offset 0x57a + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb8, offset 0x583 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb9, offset 0x587 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xba, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbb, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbc, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbe, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc1, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc2, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5e2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc4, offset 0x5eb + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc5, offset 0x5ee + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f3 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x5fe + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc8, offset 0x607 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc9, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xca, offset 0x616 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xcb, offset 0x620 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xcc, offset 0x629 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xcd, offset 0x635 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xce, offset 0x642 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcf, offset 0x64f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd0, offset 0x65d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd1, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xd2, offset 0x667 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xd3, offset 0x66c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xd4, offset 0x66f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xd5, offset 0x672 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd6, offset 0x675 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd7, offset 0x67c + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd8, offset 0x683 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd9, offset 0x687 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xda, offset 0x692 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xdb, offset 0x695 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x698 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xdd, offset 0x69b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x6a1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xdf, offset 0x6a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xe0, offset 0x6aa + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x6ad + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe2, offset 0x6b0 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe3, offset 0x6b3 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6b6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xe5, offset 0x6b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xe6, offset 0x6be + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xe7, offset 0x6c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe9, offset 0x6cf + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xea, offset 0x6de + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xeb, offset 0x6ea + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xec, offset 0x6ee + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xed, offset 0x6f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xef, offset 0x6fc + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf0, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x705 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xf2, offset 0x70e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xf3, offset 0x719 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf4, offset 0x71f + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xf5, offset 0x727 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0xf6, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf7, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xf8, offset 0x731 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xf9, offset 0x735 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xfa, offset 0x73b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xfc, offset 0x746 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xfd, offset 0x749 + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xfe, offset 0x759 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xff, offset 0x760 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x100, offset 0x763 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0xbf}, + // Block 0x101, offset 0x766 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x102, offset 0x76a + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x103, offset 0x770 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x104, offset 0x775 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x105, offset 0x77a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x782 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x107, offset 0x787 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x108, offset 0x78b + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x78f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10a, offset 0x792 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x10b, offset 0x795 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x10c, offset 0x799 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x10d, offset 0x79d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x10e, offset 0x7a0 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x10f, offset 0x7b0 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x110, offset 0x7c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x111, offset 0x7c6 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x112, offset 0x7c8 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x7ca + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42466 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go new file mode 100644 index 000000000..8b65fa167 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -0,0 +1,4486 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 124: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 124 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 126 blocks, 8064 entries, 16128 bytes +// The third block is the zero block. +var idnaValues = [8064]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308, + 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008, + 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308, + 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308, + 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1, + 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308, + 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008, + 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008, + // Block 0x16, offset 0x580 + 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008, + 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008, + 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008, + 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008, + 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008, + 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040, + 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040, + 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008, + 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1, + 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018, + 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018, + 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040, + // Block 0x18, offset 0x600 + 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040, + 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008, + 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308, + 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040, + 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040, + 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308, + 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008, + 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008, + 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040, + 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040, + 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040, + 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9, + 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018, + 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040, + 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040, + 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040, + 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308, + 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040, + 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040, + 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018, + 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018, + // Block 0x20, offset 0x800 + 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008, + 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308, + // Block 0x21, offset 0x840 + 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040, + 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040, + 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040, + 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008, + 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018, + 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018, + 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008, + 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040, + 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040, + 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008, + 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308, + 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308, + 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040, + 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59, + 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008, + // Block 0x25, offset 0x940 + 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018, + 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308, + 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308, + 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11, + 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308, + 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308, + 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308, + 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008, + 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41, + 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008, + 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1, + 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011, + 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041, + 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9, + 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099, + 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269, + 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1, + 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008, + 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008, + 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008, + 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169, + 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9, + 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251, + 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9, + 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359, + 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1, + 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008, + 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008, + 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008, + 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008, + 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045, + 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008, + 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045, + 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008, + 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045, + 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045, + 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489, + 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1, + 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040, + // Block 0x2c, offset 0xb00 + 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1, + 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591, + 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1, + 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1, + 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771, + 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891, + 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831, + 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951, + 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459, + 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040, + 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489, + 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2, + 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa, + 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9, + 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a, + 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0, + 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018, + 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a, + 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018, + 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018, + 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018, + 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018, + 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340, + 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340, + 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61, + 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd, + 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71, + // Block 0x30, offset 0xc00 + 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61, + 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5, + 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09, + 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359, + 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040, + 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018, + 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018, + 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040, + // Block 0x31, offset 0xc40 + 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e, + 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249, + 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41, + 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018, + 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269, + 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018, + 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018, + 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09, + 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9, + 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd, + 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109, + // Block 0x32, offset 0xc80 + 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9, + 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018, + 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151, + 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279, + 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399, + 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439, + 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369, + 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61, + 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451, + 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5, + 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018, + 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040, + 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51, + 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601, + 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691, + 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26, + 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6, + 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a, + // Block 0x34, offset 0xd00 + 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a, + 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46, + 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06, + 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6, + 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86, + 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46, + 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199, + 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259, + // Block 0x35, offset 0xd40 + 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99, + 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089, + 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9, + 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249, + 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71, + 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9, + 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1, + 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018, + 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018, + 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018, + 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018, + // Block 0x36, offset 0xd80 + 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008, + 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008, + 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008, + 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008, + 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008, + 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd, + 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d, + 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9, + 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d, + 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008, + 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9, + // Block 0x37, offset 0xdc0 + 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008, + 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008, + 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008, + 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008, + 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008, + 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008, + 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018, + 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308, + 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d, + 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d, + 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d, + 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040, + 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040, + 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040, + 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040, + 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040, + 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040, + 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008, + 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018, + 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018, + 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018, + 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018, + 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018, + 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018, + 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018, + 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018, + 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd, + 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd, + 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d, + 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d, + 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d, + 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd, + 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d, + 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd, + 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d, + 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd, + 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd, + 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d, + 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd, + 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d, + 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008, + 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008, + 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008, + 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008, + 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040, + 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd, + 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018, + 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761, + 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1, + 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881, + 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd, + 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d, + 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d, + 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd, + 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d, + 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d, + 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d, + 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd, + 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd, + 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d, + 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d, + 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd, + 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d, + 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999, + 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29, + 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69, + 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69, + 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15, + 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75, + 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded, + 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d, + 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5, + 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d, + 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d, + 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd, + 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9, + 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1, + 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9, + 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549, + 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1, + 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11, + 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91, + 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9, + 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011, + 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209, + 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361, + // Block 0x40, offset 0x1000 + 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541, + 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781, + 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979, + 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89, + 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1, + 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99, + 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9, + 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9, + 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069, + 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9, + 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9, + // Block 0x41, offset 0x1040 + 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271, + 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9, + 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed, + 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371, + 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9, + 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d, + 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211, + 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1, + 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599, + 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9, + 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611, + // Block 0x42, offset 0x1080 + 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671, + 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709, + 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781, + 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1, + 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811, + 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901, + 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1, + 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11, + 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31, + 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51, + 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040, + 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040, + 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040, + // Block 0x48, offset 0x1200 + 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575, + 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635, + 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008, + 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715, + 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5, + 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008, + 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008, + 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935, + 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5, + 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5, + 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35, + // Block 0x49, offset 0x1240 + 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35, + 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5, + 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19, + 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91, + 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040, + 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040, + 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040, + 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040, + 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040, + 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040, + 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001, + 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040, + 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040, + 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9, + 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1, + 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149, + 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2, + 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1, + 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1, + 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479, + 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040, + 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659, + 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721, + 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751, + 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769, + 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799, + 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1, + 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1, + 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9, + 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829, + 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871, + 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9, + 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9, + 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919, + 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931, + 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961, + 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991, + 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1, + 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818, + 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818, + 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040, + 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040, + 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040, + 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09, + 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479, + 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81, + 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1, + 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19, + 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91, + 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1, + 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09, + // Block 0x4e, offset 0x1380 + 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1, + 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1, + 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1, + 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991, + 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81, + 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a, + 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99, + 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89, + 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79, + 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19, + 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649, + 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9, + 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49, + 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21, + 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9, + 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01, + 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91, + 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9, + 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171, + 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289, + 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329, + // Block 0x50, offset 0x1400 + 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1, + 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621, + 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739, + 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1, + 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9, + 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29, + 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079, + 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1, + 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171, + 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261, + 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301, + // Block 0x51, offset 0x1440 + 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1, + 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1, + 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171, + 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261, + 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351, + 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441, + 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509, + 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1, + 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081, + 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239, + 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018, + // Block 0x52, offset 0x1480 + 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040, + 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609, + 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721, + 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839, + 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919, + 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9, + 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9, + 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9, + 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1, + 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79, + // Block 0x53, offset 0x14c0 + 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989, + 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040, + 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040, + 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040, + 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040, + 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040, + 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9, + 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12, + 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040, + // Block 0x54, offset 0x1500 + 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0, + 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0, + 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55, + 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75, + 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308, + 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308, + 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308, + 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2, + 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35, + 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55, + // Block 0x55, offset 0x1540 + 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018, + 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56, + 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95, + 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa, + 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95, + 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99, + 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda, + 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040, + 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040, + 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081, + 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141, + 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171, + 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1, + 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1, + 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201, + 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219, + 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249, + 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291, + 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1, + 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9, + 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321, + 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339, + 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369, + 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381, + 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1, + 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9, + 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9, + 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1, + 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441, + 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9, + 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0, + // Block 0x58, offset 0x1600 + 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea, + 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2, + 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9, + 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81, + 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2, + 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159, + 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41, + 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9, + 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9, + 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a, + 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a, + // Block 0x59, offset 0x1640 + 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09, + 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51, + 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039, + 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279, + 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a, + 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115, + 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5, + 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295, + 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355, + 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415, + 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215, + // Block 0x5a, offset 0x1680 + 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515, + 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595, + 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5, + 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655, + 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115, + 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735, + 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5, + 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5, + 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5, + 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5, + 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5, + 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715, + 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040, + 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935, + 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040, + 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6, + 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35, + 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040, + 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040, + 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08, + 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808, + 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08, + 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908, + 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08, + 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808, + 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040, + 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18, + 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818, + 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08, + 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08, + 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08, + 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040, + 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040, + 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18, + 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818, + 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040, + 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008, + 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008, + 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040, + 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008, + 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008, + 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008, + 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308, + 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040, + 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040, + 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199, + 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359, + 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269, + 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369, + 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9, + 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259, + 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99, + 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089, + 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9, + 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249, + 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359, + // Block 0x61, offset 0x1840 + 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269, + 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369, + 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9, + 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259, + 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99, + 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089, + 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9, + 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249, + 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71, + 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9, + 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369, + // Block 0x62, offset 0x1880 + 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9, + 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259, + 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99, + 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089, + 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040, + 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71, + 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9, + 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1, + 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199, + 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99, + 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089, + 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9, + 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249, + 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71, + 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9, + 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1, + 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199, + 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359, + 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269, + 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089, + // Block 0x64, offset 0x1900 + 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9, + 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71, + 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9, + 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040, + 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199, + 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359, + 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269, + 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369, + 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9, + 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040, + 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9, + 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040, + 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199, + 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359, + 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269, + 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369, + 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9, + 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259, + 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99, + 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9, + // Block 0x66, offset 0x1980 + 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1, + 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199, + 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359, + 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269, + 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369, + 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9, + 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259, + 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99, + 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089, + 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9, + 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359, + 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269, + 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369, + 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9, + 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259, + 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99, + 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089, + 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9, + 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249, + 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71, + 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369, + 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9, + 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259, + 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99, + 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089, + 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9, + 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249, + 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71, + 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9, + 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1, + 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259, + 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99, + 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089, + 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9, + 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249, + 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71, + 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9, + 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1, + 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199, + 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359, + 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089, + 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9, + 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249, + 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71, + 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9, + 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1, + 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099, + 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429, + 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71, + 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9, + 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9, + 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11, + 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109, + 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1, + 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429, + 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099, + 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429, + 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71, + 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9, + 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01, + 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11, + 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109, + 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1, + 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429, + 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099, + 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429, + 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71, + 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9, + 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01, + 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1, + 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109, + 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1, + 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429, + 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099, + 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429, + 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71, + 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9, + 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01, + 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1, + 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41, + 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1, + 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429, + 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099, + 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429, + 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71, + 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9, + 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01, + 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1, + 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41, + 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1, + 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429, + 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079, + 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1, + 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61, + 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9, + 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81, + 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079, + 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1, + 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61, + 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411, + 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1, + 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231, + 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949, + 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231, + 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949, + 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411, + 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231, + 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249, + 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02, + 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2, + 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72, + 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32, + 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2, + 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2, + 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040, + 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199, + 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359, + 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089, + 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1, + 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289, + 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349, + 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409, + 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9, + 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589, + 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649, + 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709, + 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79, + 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39, + 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9, + 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39, + 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9, + 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79, + 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39, + 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9, + 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059, + 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9, + 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239, + 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9, + 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399, + 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459, + 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309, + 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559, + 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9, + 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679, + 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9, + 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d, + 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9, + 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959, + 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d, + 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d, + 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9, + 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99, + 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9, + 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9, + 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99, + 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39, + 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639, + 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9, + 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d, + 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9, + 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d, + 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd, + 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979, + 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19, + 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d, + 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d, + 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99, + 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39, + 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9, + 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39, + 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd, + 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19, + 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9, + 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59, + 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd, + 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d, + 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d, + 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d, + 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879, + 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919, + 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd, + 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9, + 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99, + 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39, + 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9, + 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d, + 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19, + 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9, + 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59, + 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9, + 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d, + 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040, + 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040, + 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040, + 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040, + 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3, + 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b, + // Block 0x6, offset 0x180 + 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca, + 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1, + 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6, + 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2, + 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9, + 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1, + 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b, + 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51, + 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f, + 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138, + 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba, + 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a, + 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168, + 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c, + 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba, + // Block 0x1d, offset 0x740 + 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba, + 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba, + 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba, + 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba, + 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a, + 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d, + 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba, + 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba, + 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba, + 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba, + 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba, + 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba, + 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 258 entries, 516 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a} + +// idnaSparseValues: 1869 entries, 7476 bytes +var idnaSparseValues = [1869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0c08, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x85}, + {value: 0x0c08, lo: 0x86, hi: 0x87}, + {value: 0x0a08, lo: 0x88, hi: 0x88}, + {value: 0x0c08, lo: 0x89, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0x93}, + {value: 0x0c08, lo: 0x94, hi: 0x94}, + {value: 0x0a08, lo: 0x95, hi: 0x95}, + {value: 0x0808, lo: 0x96, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x93 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0x10, offset 0x98 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa1 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xd8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x16, offset 0xe9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x17, offset 0xf3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x18, offset 0xfa + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x19, offset 0x107 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x1a, offset 0x118 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1e, offset 0x147 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1f, offset 0x151 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x20, offset 0x153 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x21, offset 0x158 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x22, offset 0x15b + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x23, offset 0x15e + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x24, offset 0x160 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x25, offset 0x16c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x26, offset 0x177 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x17f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x28, offset 0x185 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x29, offset 0x18b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2a, offset 0x190 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2b, offset 0x195 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2c, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2d, offset 0x19c + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2e, offset 0x1a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2f, offset 0x1a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x30, offset 0x1b3 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x31, offset 0x1bd + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x32, offset 0x1c3 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x33, offset 0x1d4 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x34, offset 0x1de + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x35, offset 0x1e1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x36, offset 0x1e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x37, offset 0x1ec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x38, offset 0x1f9 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x39, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x3a, offset 0x205 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3b, offset 0x20c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x214 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x224 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x230 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3f, offset 0x232 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23c + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x41, offset 0x248 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x42, offset 0x254 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x43, offset 0x260 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x44, offset 0x268 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x46, offset 0x277 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x47, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x48, offset 0x28c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x49, offset 0x297 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x4a, offset 0x29b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4b, offset 0x2a4 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4c, offset 0x2ac + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x2b2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4e, offset 0x2b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4f, offset 0x2ba + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x50, offset 0x2bd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x51, offset 0x2c1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x52, offset 0x2c7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x53, offset 0x2cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x54, offset 0x2cf + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2dc + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x5a, offset 0x2fc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x306 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5c, offset 0x30a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5d, offset 0x30d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5e, offset 0x313 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5f, offset 0x317 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x319 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x61, offset 0x31c + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x62, offset 0x31e + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x63, offset 0x321 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x32e + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x66, offset 0x33d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x341 + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x68, offset 0x346 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x69, offset 0x349 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x6a, offset 0x34d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6b, offset 0x352 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6c, offset 0x357 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6f, offset 0x372 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x70, offset 0x378 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x71, offset 0x37c + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x38b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x75, offset 0x3a2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x3ad + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3b5 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3c6 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x79, offset 0x3cf + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x7a, offset 0x3df + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ec + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7e, offset 0x408 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7f, offset 0x40c + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x83, offset 0x419 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x84, offset 0x41d + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x85, offset 0x426 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x86, offset 0x42c + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x430 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x440 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x89, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x8a, offset 0x44f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x452 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8c, offset 0x458 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8d, offset 0x45f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8e, offset 0x464 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x468 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x90, offset 0x46e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x91, offset 0x473 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x92, offset 0x47c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x93, offset 0x481 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x94, offset 0x487 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x95, offset 0x48e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x96, offset 0x495 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x49c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x4a5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x9a, offset 0x4a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x4b9 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9d, offset 0x4bf + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x4c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9f, offset 0x4cb + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x4d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0xa1, offset 0x4d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa2, offset 0x4dc + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa3, offset 0x4ec + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa4, offset 0x4f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x4f7 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa6, offset 0x4fb + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa7, offset 0x502 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa9, offset 0x507 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xaa, offset 0x50a + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xab, offset 0x50e + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x512 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x518 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xae, offset 0x521 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x52d + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb0, offset 0x534 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb1, offset 0x53d + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb2, offset 0x545 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x54c + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb4, offset 0x55a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x567 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb6, offset 0x574 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb8, offset 0x581 + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb9, offset 0x58f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x597 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbb, offset 0x5a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5ab + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbd, offset 0x5b1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5b9 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbf, offset 0x5c2 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc0, offset 0x5cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc1, offset 0x5cf + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc2, offset 0x5db + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5e3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc5, offset 0x5e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f0 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc7, offset 0x5f9 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc8, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc9, offset 0x608 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xca, offset 0x60d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xcb, offset 0x610 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcc, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcd, offset 0x616 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xce, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcf, offset 0x624 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd0, offset 0x628 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd1, offset 0x633 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd2, offset 0x636 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x63c + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd4, offset 0x641 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd5, offset 0x645 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd6, offset 0x648 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd7, offset 0x64b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd8, offset 0x64e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd9, offset 0x653 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xda, offset 0x65d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xdb, offset 0x660 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xdc, offset 0x664 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xdd, offset 0x673 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x67f + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdf, offset 0x683 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe0, offset 0x688 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x68d + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x691 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe3, offset 0x696 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x69f + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe5, offset 0x6aa + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe6, offset 0x6b0 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xea, offset 0x6c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xeb, offset 0x6cc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xec, offset 0x6d1 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xed, offset 0x6d4 + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xee, offset 0x6e2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xef, offset 0x6e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf0, offset 0x6ec + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf1, offset 0x6ef + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf2, offset 0x6f3 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf3, offset 0x6f9 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf4, offset 0x6fe + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf5, offset 0x708 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf6, offset 0x70d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf7, offset 0x710 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf8, offset 0x713 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf9, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xfa, offset 0x719 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xfb, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfc, offset 0x720 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfd, offset 0x730 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xff, offset 0x746 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x100, offset 0x748 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x101, offset 0x74a + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41662 bytes (40KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 000000000..c4ef847e7 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 000000000..7a8cf889b --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 000000000..685f0e7ea --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 000000000..c646a6952 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 000000000..9bf4286c7 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 000000000..3ebf6f2da --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 000000000..fa139db22 --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 000000000..dfbed62cf --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 000000000..0f443e693 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,35 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + +## Policy for new packages + +We no longer accept new provider-specific packages in this repo. For +defining provider endpoints and provider-specific OAuth2 behavior, we +encourage you to create packages elsewhere. We'll keep the existing +packages for compatibility. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod new file mode 100644 index 000000000..b34578155 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.mod @@ -0,0 +1,10 @@ +module golang.org/x/oauth2 + +go 1.11 + +require ( + cloud.google.com/go v0.34.0 + golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + google.golang.org/appengine v1.4.0 +) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum new file mode 100644 index 000000000..6f0079b0d --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.sum @@ -0,0 +1,12 @@ +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 000000000..feb1157b1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "time" + + "golang.org/x/oauth2" +) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 000000000..83dacac32 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 000000000..04c2c2216 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 000000000..ad2c09236 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource("", scopes...), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 000000000..73be62903 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 000000000..81de32b36 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + AuthStyle: oauth2.AuthStyleInParams, +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000..b0fdb3a88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 000000000..456224bc7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 000000000..743487188 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 000000000..03265e888 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 000000000..c0ab196cf --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 000000000..355c38696 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,294 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + if len(b) == 0 || string(b) == "null" { + return nil + } + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + if i > math.MaxInt32 { + i = math.MaxInt32 + } + *e = expirationTime(i) + return nil +} + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. +type AuthStyle int + +const ( + AuthStyleUnknown AuthStyle = 0 + AuthStyleInParams AuthStyle = 1 + AuthStyleInHeader AuthStyle = 2 +) + +// authStyleCache is the set of tokenURLs we've successfully used via +// RetrieveToken and which style auth we ended up using. +// It's called a cache, but it doesn't (yet?) shrink. It's expected that +// the set of OAuth2 servers a program contacts over time is fixed and +// small. +var authStyleCache struct { + sync.Mutex + m map[string]AuthStyle // keyed by tokenURL +} + +// ResetAuthCache resets the global authentication style cache used +// for AuthStyleUnknown token requests. +func ResetAuthCache() { + authStyleCache.Lock() + defer authStyleCache.Unlock() + authStyleCache.m = nil +} + +// lookupAuthStyle reports which auth style we last used with tokenURL +// when calling RetrieveToken and whether we have ever done so. +func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + style, ok = authStyleCache.m[tokenURL] + return +} + +// setAuthStyle adds an entry to authStyleCache, documented above. +func setAuthStyle(tokenURL string, v AuthStyle) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + if authStyleCache.m == nil { + authStyleCache.m = make(map[string]AuthStyle) + } + authStyleCache.m[tokenURL] = v +} + +// newTokenRequest returns a new *http.Request to retrieve a new token +// from tokenURL using the provided clientID, clientSecret, and POST +// body parameters. +// +// inParams is whether the clientID & clientSecret should be encoded +// as the POST body. An 'inParams' value of true means to send it in +// the POST body (along with any values in v); false means to send it +// in the Authorization header. +func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { + if authStyle == AuthStyleInParams { + v = cloneURLValues(v) + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if authStyle == AuthStyleInHeader { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + return req, nil +} + +func cloneURLValues(v url.Values) url.Values { + v2 := make(url.Values, len(v)) + for k, vv := range v { + v2[k] = append([]string(nil), vv...) + } + return v2 +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { + needsAuthStyleProbe := authStyle == 0 + if needsAuthStyleProbe { + if style, ok := lookupAuthStyle(tokenURL); ok { + authStyle = style + needsAuthStyleProbe = false + } else { + authStyle = AuthStyleInHeader // the first way we'll try + } + } + req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + if err != nil { + return nil, err + } + token, err := doTokenRoundTrip(ctx, req) + if err != nil && needsAuthStyleProbe { + // If we get an error, assume the server wants the + // clientID & clientSecret in a different form. + // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. + // In summary: + // - Reddit only accepts client secret in the Authorization header + // - Dropbox accepts either it in URL param or Auth header, but not both. + // - Google only accepts URL param (not spec compliant?), not Auth header + // - Stripe only accepts client secret in Auth header with Bearer method, not Basic + // + // We used to maintain a big table in this code of all the sites and which way + // they went, but maintaining it didn't scale & got annoying. + // So just try both ways. + authStyle = AuthStyleInParams // the second way we'll try + req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + token, err = doTokenRoundTrip(ctx, req) + } + if needsAuthStyleProbe && err == nil { + setAuthStyle(tokenURL, authStyle) + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token != nil && token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, err +} + +func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + r.Body.Close() + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 000000000..572074a63 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "net/http" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +var appengineClientHook func(context.Context) *http.Client + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + if appengineClientHook != nil { + return appengineClientHook(ctx) + } + return http.DefaultClient +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 000000000..683d2d271 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 000000000..b2bf18298 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 000000000..291df5c83 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,381 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "context" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint represents an OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle AuthStyle +} + +// AuthStyle represents how requests for tokens are authenticated +// to the server. +type AuthStyle int + +const ( + // AuthStyleAutoDetect means to auto-detect which authentication + // style the provider wants by trying both ways and caching + // the successful way for the future. + AuthStyleAutoDetect AuthStyle = 0 + + // AuthStyleInParams sends the "client_id" and "client_secret" + // in the POST body as application/x-www-form-urlencoded parameters. + AuthStyleInParams AuthStyle = 1 + + // AuthStyleInHeader sends the client_id and client_password + // using HTTP Basic Authorization. This is an optional style + // described in the OAuth2 RFC 6749 section 2.3.1. + AuthStyleInHeader AuthStyle = 2 +) + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-empty string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +// It can also be used to pass the PKCE challenge. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +// +// Opts may include the PKCE verifier code if previously used in AuthCodeURL. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 000000000..822720341 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,178 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// timeNow is time.Now but pulled out as a variable for tests. +var timeNow = time.Now + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 000000000..aa0d34f1e --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + + // req.Body is assumed to have been closed by the base RoundTripper. + reqBodyClosed = true + + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 000000000..06f84b855 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 000000000..da6b9e436 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "encoding/binary" + "runtime" +) + +// hostByteOrder returns binary.LittleEndian on little-endian machines and +// binary.BigEndian on big-endian machines. +func hostByteOrder() binary.ByteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "ppc64le", + "riscv", "riscv64": + return binary.LittleEndian + case "armbe", "arm64be", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "sparc", "sparc64": + return binary.BigEndian + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 000000000..679e78c2c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,126 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go new file mode 100644 index 000000000..be6027224 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix,ppc64 + +package cpu + +const cacheLineSize = 128 + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func init() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 000000000..7f2348b7d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 000000000..568bcd031 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 000000000..f7cb46971 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 000000000..e363c7d13 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 000000000..ba49b91bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 000000000..aa986f778 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 000000000..76b5f507f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false + return + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 000000000..fa7fb1bd7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 000000000..6c8d975d4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 000000000..d579eaef4 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // cryptography facilities + msa4 facility = 77 // message-security-assist extension 4 + msa8 facility = 146 // message-security-assist extension 8 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 000000000..f55e0c82c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 000000000..cda87b1a1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 000000000..dd1e76dc9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 000000000..e5037d92e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 000000000..bd9bbda0c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 000000000..d70d317f5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<